var/home/core/zuul-output/0000755000175000017500000000000015111370277014531 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111410050015455 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005376225515111410040017677 0ustar rootrootNov 25 18:12:48 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 18:12:48 crc restorecon[4674]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:48 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 18:12:49 crc restorecon[4674]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 18:12:50 crc kubenswrapper[4926]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 18:12:50 crc kubenswrapper[4926]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 18:12:50 crc kubenswrapper[4926]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 18:12:50 crc kubenswrapper[4926]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 18:12:50 crc kubenswrapper[4926]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 18:12:50 crc kubenswrapper[4926]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.071454 4926 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076722 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076751 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076761 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076772 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076781 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076791 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076799 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076808 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076817 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076825 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076834 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076842 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076851 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076860 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076868 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076876 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076884 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076893 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076901 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076910 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076918 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076926 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076935 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076943 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076951 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076959 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076971 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.076982 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077008 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077018 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077027 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077036 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077044 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077053 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077062 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077070 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077078 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077087 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077095 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077104 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077112 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077121 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077129 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077137 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077146 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077154 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077165 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077177 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077188 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077198 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077208 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077217 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077227 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077235 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077246 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077257 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077267 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077276 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077285 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077293 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077303 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077312 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077320 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077329 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077338 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077348 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077361 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077397 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077406 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077415 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.077424 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077592 4926 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077609 4926 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077624 4926 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077636 4926 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077648 4926 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077658 4926 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077670 4926 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077684 4926 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077695 4926 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077704 4926 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077715 4926 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077725 4926 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077735 4926 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077745 4926 flags.go:64] FLAG: --cgroup-root="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077754 4926 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077764 4926 flags.go:64] FLAG: --client-ca-file="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077774 4926 flags.go:64] FLAG: --cloud-config="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077783 4926 flags.go:64] FLAG: --cloud-provider="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077793 4926 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077804 4926 flags.go:64] FLAG: --cluster-domain="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077814 4926 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077824 4926 flags.go:64] FLAG: --config-dir="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077834 4926 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077844 4926 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077856 4926 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077865 4926 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077875 4926 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077886 4926 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077896 4926 flags.go:64] FLAG: --contention-profiling="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077906 4926 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077916 4926 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077926 4926 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077935 4926 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077956 4926 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077966 4926 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077976 4926 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077985 4926 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.077995 4926 flags.go:64] FLAG: --enable-server="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078004 4926 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078016 4926 flags.go:64] FLAG: --event-burst="100" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078027 4926 flags.go:64] FLAG: --event-qps="50" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078037 4926 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078046 4926 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078057 4926 flags.go:64] FLAG: --eviction-hard="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078068 4926 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078078 4926 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078087 4926 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078098 4926 flags.go:64] FLAG: --eviction-soft="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078108 4926 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078117 4926 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078127 4926 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078137 4926 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078149 4926 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078159 4926 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078169 4926 flags.go:64] FLAG: --feature-gates="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078180 4926 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078190 4926 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078201 4926 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078210 4926 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078220 4926 flags.go:64] FLAG: --healthz-port="10248" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078260 4926 flags.go:64] FLAG: --help="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078272 4926 flags.go:64] FLAG: --hostname-override="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078283 4926 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078293 4926 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078303 4926 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078313 4926 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078322 4926 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078332 4926 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078341 4926 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078351 4926 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078360 4926 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078397 4926 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078410 4926 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078420 4926 flags.go:64] FLAG: --kube-reserved="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078430 4926 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078439 4926 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078449 4926 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078459 4926 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078469 4926 flags.go:64] FLAG: --lock-file="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078481 4926 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078491 4926 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078500 4926 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078514 4926 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078525 4926 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078535 4926 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078544 4926 flags.go:64] FLAG: --logging-format="text" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078553 4926 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078564 4926 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078574 4926 flags.go:64] FLAG: --manifest-url="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078583 4926 flags.go:64] FLAG: --manifest-url-header="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078595 4926 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078605 4926 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078616 4926 flags.go:64] FLAG: --max-pods="110" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078625 4926 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078635 4926 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078645 4926 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078654 4926 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078664 4926 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078674 4926 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078684 4926 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078703 4926 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078713 4926 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078723 4926 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078733 4926 flags.go:64] FLAG: --pod-cidr="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078743 4926 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078757 4926 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078766 4926 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078775 4926 flags.go:64] FLAG: --pods-per-core="0" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078785 4926 flags.go:64] FLAG: --port="10250" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078795 4926 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078805 4926 flags.go:64] FLAG: --provider-id="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078814 4926 flags.go:64] FLAG: --qos-reserved="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078824 4926 flags.go:64] FLAG: --read-only-port="10255" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078833 4926 flags.go:64] FLAG: --register-node="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078843 4926 flags.go:64] FLAG: --register-schedulable="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078854 4926 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078870 4926 flags.go:64] FLAG: --registry-burst="10" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078879 4926 flags.go:64] FLAG: --registry-qps="5" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078889 4926 flags.go:64] FLAG: --reserved-cpus="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078899 4926 flags.go:64] FLAG: --reserved-memory="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078910 4926 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078920 4926 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078930 4926 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078939 4926 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078949 4926 flags.go:64] FLAG: --runonce="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078958 4926 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078968 4926 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078978 4926 flags.go:64] FLAG: --seccomp-default="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078987 4926 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.078997 4926 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079007 4926 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079017 4926 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079027 4926 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079036 4926 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079046 4926 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079055 4926 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079075 4926 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079085 4926 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079095 4926 flags.go:64] FLAG: --system-cgroups="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079105 4926 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079120 4926 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079129 4926 flags.go:64] FLAG: --tls-cert-file="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079139 4926 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079151 4926 flags.go:64] FLAG: --tls-min-version="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079160 4926 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079170 4926 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079180 4926 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079190 4926 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079200 4926 flags.go:64] FLAG: --v="2" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079212 4926 flags.go:64] FLAG: --version="false" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079224 4926 flags.go:64] FLAG: --vmodule="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079238 4926 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.079248 4926 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079490 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079502 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079511 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079521 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079529 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079538 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079550 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079561 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079571 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079580 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079588 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079597 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079605 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079614 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079622 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079634 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079645 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079655 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079665 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079674 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079683 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079691 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079701 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079709 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079718 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079726 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079734 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079743 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079752 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079760 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079769 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079777 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079786 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079794 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079804 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079813 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079821 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079829 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079838 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079846 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079856 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079864 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079872 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079880 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079889 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079898 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079906 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079916 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079924 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079932 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079941 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079950 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079961 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079971 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079980 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079989 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.079997 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080005 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080013 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080022 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080030 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080039 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080047 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080055 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080064 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080072 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080081 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080089 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080098 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080106 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.080117 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.080142 4926 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.094172 4926 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.094224 4926 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094419 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094434 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094444 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094457 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094472 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094484 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094495 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094505 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094513 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094523 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094533 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094543 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094552 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094561 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094572 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094584 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094594 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094603 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094612 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094620 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094629 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094638 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094646 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094655 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094666 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094679 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094688 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094697 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094706 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094714 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094722 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094732 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094740 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094749 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094759 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094768 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094777 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094786 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094795 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094804 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094812 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094821 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094829 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094838 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094846 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094854 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094863 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094871 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094880 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094888 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094896 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094904 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094913 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094921 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094930 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094938 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094946 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094955 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094963 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094972 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094980 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094989 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.094998 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095006 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095014 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095023 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095031 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095039 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095050 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095061 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095074 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.095088 4926 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095342 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095357 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095407 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095421 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095431 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095440 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095449 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095458 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095466 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095476 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095485 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095493 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095502 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095511 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095519 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095528 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095537 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095545 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095555 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095563 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095572 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095583 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095594 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095605 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095614 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095623 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095632 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095641 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095650 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095659 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095668 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095677 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095688 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095700 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095711 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095720 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095730 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095738 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095839 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095852 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095861 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095869 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095878 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095889 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095900 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095909 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095919 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095928 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095937 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095946 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095955 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095963 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095972 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095981 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095990 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.095999 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096008 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096016 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096025 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096033 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096042 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096050 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096058 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096066 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096075 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096083 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096092 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096100 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096108 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096117 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.096127 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.096140 4926 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.096477 4926 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.105830 4926 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.105969 4926 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.108617 4926 server.go:997] "Starting client certificate rotation" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.108674 4926 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.109407 4926 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-16 01:33:56.197889929 +0000 UTC Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.109514 4926 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1231h21m6.088381513s for next certificate rotation Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.135927 4926 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.139627 4926 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.156603 4926 log.go:25] "Validated CRI v1 runtime API" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.200214 4926 log.go:25] "Validated CRI v1 image API" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.204770 4926 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.211712 4926 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-18-07-33-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.211767 4926 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.242609 4926 manager.go:217] Machine: {Timestamp:2025-11-25 18:12:50.237948799 +0000 UTC m=+0.623462434 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:26522ffc-f7a5-422b-aa8b-57e952227505 BootID:c1388c23-14d1-4724-ab12-311163f5cca5 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:38:10:fc Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:38:10:fc Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:21:e5:2f Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:92:90:18 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:b8:09:06 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:55:b5:a8 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:12:1f:14:bb:bf:d2 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:c6:28:ee:5f:da:29 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.242870 4926 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.243144 4926 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.246428 4926 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.246593 4926 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.246631 4926 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.248438 4926 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.248456 4926 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.249033 4926 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.249056 4926 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.249246 4926 state_mem.go:36] "Initialized new in-memory state store" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.249317 4926 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.253989 4926 kubelet.go:418] "Attempting to sync node with API server" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.254011 4926 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.254039 4926 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.254055 4926 kubelet.go:324] "Adding apiserver pod source" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.254083 4926 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.257819 4926 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.259530 4926 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.262426 4926 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.264057 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264193 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264226 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264239 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264248 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264264 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264275 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.264186 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264285 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264367 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264403 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264414 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264428 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.264438 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.264108 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.264545 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.266106 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.268308 4926 server.go:1280] "Started kubelet" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.269449 4926 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.269806 4926 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.269911 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:50 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.270878 4926 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.278497 4926 server.go:460] "Adding debug handlers to kubelet server" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.278507 4926 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.279463 4926 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.279793 4926 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 04:30:00.252409142 +0000 UTC Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.279870 4926 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 826h17m9.972543334s for next certificate rotation Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.280069 4926 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.280356 4926 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.280092 4926 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.280208 4926 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.281020 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.281152 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.281853 4926 factory.go:55] Registering systemd factory Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.281892 4926 factory.go:221] Registration of the systemd container factory successfully Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.282326 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="200ms" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.282696 4926 factory.go:153] Registering CRI-O factory Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.282725 4926 factory.go:221] Registration of the crio container factory successfully Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.282792 4926 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.282824 4926 factory.go:103] Registering Raw factory Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.282843 4926 manager.go:1196] Started watching for new ooms in manager Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.282295 4926 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.212:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b527bb21b2409 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:12:50.268259337 +0000 UTC m=+0.653772962,LastTimestamp:2025-11-25 18:12:50.268259337 +0000 UTC m=+0.653772962,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.283858 4926 manager.go:319] Starting recovery of all containers Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288660 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288735 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288750 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288764 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288779 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288791 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288804 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288815 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288829 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288842 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288854 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288867 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288879 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288894 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288907 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288918 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288930 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288942 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288954 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288965 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288981 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.288993 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289006 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289017 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289030 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289042 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289057 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289112 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289128 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289140 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289151 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289164 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289175 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289185 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289197 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289210 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289224 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289238 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289249 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289262 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289273 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289284 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289295 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289306 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289320 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289331 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289342 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289354 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289366 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289396 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289406 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289417 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289435 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289447 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289460 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289474 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289486 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289498 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289509 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289520 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289534 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289545 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289559 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289571 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289582 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289594 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289606 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289618 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289630 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289643 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289656 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289667 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289688 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289701 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289713 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289724 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289736 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289747 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289759 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289771 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289783 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289794 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289851 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289864 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289877 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289889 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289902 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289913 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289924 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289936 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.289994 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290007 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290020 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290034 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290046 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290058 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290068 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290079 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290092 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290103 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290118 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290130 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290141 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290153 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290169 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290181 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290192 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290204 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290221 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290232 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290278 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290291 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290304 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290316 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290329 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290342 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290356 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290447 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290464 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290487 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290508 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290531 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290547 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290561 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290574 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290585 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290598 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290610 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290621 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290632 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290643 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290655 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290666 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290678 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290690 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290701 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290712 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290724 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290736 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290748 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290759 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290771 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290782 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290794 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290807 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290818 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290830 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290841 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290860 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290873 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290893 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290908 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290925 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290943 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290958 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290973 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.290987 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.291004 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.291020 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.291039 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.291054 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.291068 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.291084 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294186 4926 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294271 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294292 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294316 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294335 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294356 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294383 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294399 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294424 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294440 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294462 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294475 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294492 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294510 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294523 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294544 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294561 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294572 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294590 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294602 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294614 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294635 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294651 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294676 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294690 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294701 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294722 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294736 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294757 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294774 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294792 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294813 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294825 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294843 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294855 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294866 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294882 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294894 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294910 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294921 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294932 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294946 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294958 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294969 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294984 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.294996 4926 reconstruct.go:97] "Volume reconstruction finished" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.295005 4926 reconciler.go:26] "Reconciler: start to sync state" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.306331 4926 manager.go:324] Recovery completed Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.316332 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.318600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.318734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.318818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.319791 4926 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.319818 4926 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.319861 4926 state_mem.go:36] "Initialized new in-memory state store" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.324874 4926 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.327153 4926 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.327272 4926 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.327393 4926 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.328004 4926 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.329065 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.329185 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.336897 4926 policy_none.go:49] "None policy: Start" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.338664 4926 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.338738 4926 state_mem.go:35] "Initializing new in-memory state store" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.380820 4926 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.400522 4926 manager.go:334] "Starting Device Plugin manager" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.400883 4926 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.400901 4926 server.go:79] "Starting device plugin registration server" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.401565 4926 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.401587 4926 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.402166 4926 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.402345 4926 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.402417 4926 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.408703 4926 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.428881 4926 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.429032 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.430924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.430967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.430977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.431113 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.431478 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.431511 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432201 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432224 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432232 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432274 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432434 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.432469 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433348 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433357 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433514 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433525 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433633 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433803 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.433853 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434243 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434300 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434310 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434415 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434494 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434519 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434573 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.434583 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435421 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435517 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435525 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435728 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.435869 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.436497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.436522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.436532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.482812 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="400ms" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497236 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497363 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497472 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497560 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497654 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497738 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497821 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.497911 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.498011 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.498131 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.498224 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.498343 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.498443 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.498526 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.498623 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.501907 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.503154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.503188 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.503201 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.503226 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.503678 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.212:6443: connect: connection refused" node="crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600059 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600147 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600199 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600240 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600281 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600311 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600337 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600366 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600448 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600474 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600503 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600532 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600588 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600615 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600810 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600926 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600890 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600910 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601364 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601476 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601500 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601499 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601531 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601538 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601534 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601631 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601671 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.601810 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.600859 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.704194 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.705800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.705867 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.705880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.705914 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.706421 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.212:6443: connect: connection refused" node="crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.758731 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.766032 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.770509 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.802861 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: I1125 18:12:50.807980 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.821687 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-a3da381cdc61473d288969017c52ae12efae44f24a12471e2dd92df0416e5146 WatchSource:0}: Error finding container a3da381cdc61473d288969017c52ae12efae44f24a12471e2dd92df0416e5146: Status 404 returned error can't find the container with id a3da381cdc61473d288969017c52ae12efae44f24a12471e2dd92df0416e5146 Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.822277 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-506f7b93385a3fe9a7989e36eaf523bdaec5f9622d7232db2204480f4143d0dc WatchSource:0}: Error finding container 506f7b93385a3fe9a7989e36eaf523bdaec5f9622d7232db2204480f4143d0dc: Status 404 returned error can't find the container with id 506f7b93385a3fe9a7989e36eaf523bdaec5f9622d7232db2204480f4143d0dc Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.826367 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-063c7d1631fb2e6ac8eec43d2a3e00ebe31ba8770d3f804cb4093b0cfd8d6425 WatchSource:0}: Error finding container 063c7d1631fb2e6ac8eec43d2a3e00ebe31ba8770d3f804cb4093b0cfd8d6425: Status 404 returned error can't find the container with id 063c7d1631fb2e6ac8eec43d2a3e00ebe31ba8770d3f804cb4093b0cfd8d6425 Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.829710 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-53ea6f3638c1405d378a878163e3688e06a197fe3a88540ecbdd22c3f267a7df WatchSource:0}: Error finding container 53ea6f3638c1405d378a878163e3688e06a197fe3a88540ecbdd22c3f267a7df: Status 404 returned error can't find the container with id 53ea6f3638c1405d378a878163e3688e06a197fe3a88540ecbdd22c3f267a7df Nov 25 18:12:50 crc kubenswrapper[4926]: W1125 18:12:50.834688 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-cd9d4fec48167076b1a216c3025063fec0a28f7840779935e590e6c7e58cd2dc WatchSource:0}: Error finding container cd9d4fec48167076b1a216c3025063fec0a28f7840779935e590e6c7e58cd2dc: Status 404 returned error can't find the container with id cd9d4fec48167076b1a216c3025063fec0a28f7840779935e590e6c7e58cd2dc Nov 25 18:12:50 crc kubenswrapper[4926]: E1125 18:12:50.884394 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="800ms" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.106525 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.108139 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.108187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.108200 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.108228 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 18:12:51 crc kubenswrapper[4926]: E1125 18:12:51.108665 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.212:6443: connect: connection refused" node="crc" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.271110 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.332347 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cd9d4fec48167076b1a216c3025063fec0a28f7840779935e590e6c7e58cd2dc"} Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.333442 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"063c7d1631fb2e6ac8eec43d2a3e00ebe31ba8770d3f804cb4093b0cfd8d6425"} Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.334849 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"53ea6f3638c1405d378a878163e3688e06a197fe3a88540ecbdd22c3f267a7df"} Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.336014 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"506f7b93385a3fe9a7989e36eaf523bdaec5f9622d7232db2204480f4143d0dc"} Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.336991 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"a3da381cdc61473d288969017c52ae12efae44f24a12471e2dd92df0416e5146"} Nov 25 18:12:51 crc kubenswrapper[4926]: W1125 18:12:51.656039 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:51 crc kubenswrapper[4926]: E1125 18:12:51.656133 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:51 crc kubenswrapper[4926]: E1125 18:12:51.685388 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="1.6s" Nov 25 18:12:51 crc kubenswrapper[4926]: W1125 18:12:51.790747 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:51 crc kubenswrapper[4926]: E1125 18:12:51.790826 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:51 crc kubenswrapper[4926]: W1125 18:12:51.828690 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:51 crc kubenswrapper[4926]: E1125 18:12:51.828773 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:51 crc kubenswrapper[4926]: W1125 18:12:51.847198 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:51 crc kubenswrapper[4926]: E1125 18:12:51.847353 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.908956 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.913963 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.914009 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.914022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:51 crc kubenswrapper[4926]: I1125 18:12:51.914052 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 18:12:51 crc kubenswrapper[4926]: E1125 18:12:51.914825 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.212:6443: connect: connection refused" node="crc" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.271779 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.341270 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6" exitCode=0 Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.341362 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.341554 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.342938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.342976 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.342990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.343160 4926 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="57246ff31c386a94e48b44815c9ea9f7b1b744d3e13cb1d275945ae3f1e3a923" exitCode=0 Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.343317 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"57246ff31c386a94e48b44815c9ea9f7b1b744d3e13cb1d275945ae3f1e3a923"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.343513 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.344798 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345075 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345217 4926 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784" exitCode=0 Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345279 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345407 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345787 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.345828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.346182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.346211 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.346221 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.348604 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.348650 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.348665 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.348664 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.348678 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.350565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.350601 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.350611 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.351780 4926 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69" exitCode=0 Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.351914 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.351926 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69"} Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.353013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.353036 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:52 crc kubenswrapper[4926]: I1125 18:12:52.353047 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.065123 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.271304 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:53 crc kubenswrapper[4926]: E1125 18:12:53.286089 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="3.2s" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.359093 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.359107 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"85e75f31c3c3023d42d8d5329429e64064b7b566343f87541e184fcb17fa37a3"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.360267 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.360314 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.360327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.362097 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.362174 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.362188 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.362248 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.364081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.364135 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.364149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.367491 4926 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef" exitCode=0 Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.367528 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.367625 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.368522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.368559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.368572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.373592 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.374020 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.374066 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.374084 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.374097 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b"} Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.374721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.374751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.374764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.524433 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.524485 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.526247 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.526274 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.526285 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.526306 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 18:12:53 crc kubenswrapper[4926]: E1125 18:12:53.526691 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.212:6443: connect: connection refused" node="crc" Nov 25 18:12:53 crc kubenswrapper[4926]: I1125 18:12:53.533549 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:53 crc kubenswrapper[4926]: E1125 18:12:53.705605 4926 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.212:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b527bb21b2409 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:12:50.268259337 +0000 UTC m=+0.653772962,LastTimestamp:2025-11-25 18:12:50.268259337 +0000 UTC m=+0.653772962,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:12:53 crc kubenswrapper[4926]: W1125 18:12:53.724737 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:53 crc kubenswrapper[4926]: E1125 18:12:53.724837 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:53 crc kubenswrapper[4926]: W1125 18:12:53.784531 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:53 crc kubenswrapper[4926]: E1125 18:12:53.784599 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:53 crc kubenswrapper[4926]: W1125 18:12:53.908410 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.212:6443: connect: connection refused Nov 25 18:12:53 crc kubenswrapper[4926]: E1125 18:12:53.908494 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.212:6443: connect: connection refused" logger="UnhandledError" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.377603 4926 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12" exitCode=0 Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.377664 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12"} Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.377760 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.378603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.378632 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.378649 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.385927 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.386027 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.386132 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199"} Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.386242 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.386351 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.386517 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.386535 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.387423 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.387463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.387476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.387913 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.387977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.388002 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.388317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.388347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.388357 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.388896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.388963 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.388973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:54 crc kubenswrapper[4926]: I1125 18:12:54.895629 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.395231 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839"} Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.395280 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.395300 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c"} Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.395332 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e"} Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.395360 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2"} Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.395421 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.395518 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.396294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.396329 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.396344 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.396696 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.396728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.396740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.588066 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.588463 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.590350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.590478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:55 crc kubenswrapper[4926]: I1125 18:12:55.590508 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.403128 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.403175 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.403173 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164"} Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.403424 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.404061 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.404087 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.404096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.404910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.404938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.404947 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.727507 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.729257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.729321 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.729334 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:56 crc kubenswrapper[4926]: I1125 18:12:56.729400 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.237270 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.405538 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.405591 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.406786 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.406811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.406823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.406824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.406948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:57 crc kubenswrapper[4926]: I1125 18:12:57.406967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:58 crc kubenswrapper[4926]: I1125 18:12:58.488099 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 18:12:58 crc kubenswrapper[4926]: I1125 18:12:58.488303 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:58 crc kubenswrapper[4926]: I1125 18:12:58.489347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:58 crc kubenswrapper[4926]: I1125 18:12:58.489414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:58 crc kubenswrapper[4926]: I1125 18:12:58.489425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.211636 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.211821 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.212853 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.212877 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.212886 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.333312 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.333542 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.334625 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.334653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:12:59 crc kubenswrapper[4926]: I1125 18:12:59.334663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:00 crc kubenswrapper[4926]: E1125 18:13:00.408833 4926 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 18:13:01 crc kubenswrapper[4926]: I1125 18:13:01.957316 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:13:01 crc kubenswrapper[4926]: I1125 18:13:01.957447 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:13:01 crc kubenswrapper[4926]: I1125 18:13:01.958407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:01 crc kubenswrapper[4926]: I1125 18:13:01.958438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:01 crc kubenswrapper[4926]: I1125 18:13:01.958447 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:02 crc kubenswrapper[4926]: I1125 18:13:02.212312 4926 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 18:13:02 crc kubenswrapper[4926]: I1125 18:13:02.212458 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:13:03 crc kubenswrapper[4926]: I1125 18:13:03.328167 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 18:13:03 crc kubenswrapper[4926]: I1125 18:13:03.328413 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:13:03 crc kubenswrapper[4926]: I1125 18:13:03.329664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:03 crc kubenswrapper[4926]: I1125 18:13:03.329725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:03 crc kubenswrapper[4926]: I1125 18:13:03.329734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.226640 4926 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.226723 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.338444 4926 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]log ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]etcd ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/priority-and-fairness-filter ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-apiextensions-informers ok Nov 25 18:13:04 crc kubenswrapper[4926]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Nov 25 18:13:04 crc kubenswrapper[4926]: [-]poststarthook/crd-informer-synced failed: reason withheld Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-system-namespaces-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 25 18:13:04 crc kubenswrapper[4926]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 25 18:13:04 crc kubenswrapper[4926]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/bootstrap-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/start-kube-aggregator-informers ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/apiservice-registration-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/apiservice-discovery-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]autoregister-completion ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/apiservice-openapi-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 25 18:13:04 crc kubenswrapper[4926]: livez check failed Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.338503 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.423219 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.431529 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199" exitCode=255 Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.431575 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199"} Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.431750 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.432907 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.432938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.432949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:04 crc kubenswrapper[4926]: I1125 18:13:04.433588 4926 scope.go:117] "RemoveContainer" containerID="62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199" Nov 25 18:13:05 crc kubenswrapper[4926]: I1125 18:13:05.436259 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 18:13:05 crc kubenswrapper[4926]: I1125 18:13:05.438785 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665"} Nov 25 18:13:05 crc kubenswrapper[4926]: I1125 18:13:05.438960 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:13:05 crc kubenswrapper[4926]: I1125 18:13:05.439746 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:05 crc kubenswrapper[4926]: I1125 18:13:05.439782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:05 crc kubenswrapper[4926]: I1125 18:13:05.439791 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:07 crc kubenswrapper[4926]: I1125 18:13:07.238061 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:13:07 crc kubenswrapper[4926]: I1125 18:13:07.238343 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:13:07 crc kubenswrapper[4926]: I1125 18:13:07.240461 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:07 crc kubenswrapper[4926]: I1125 18:13:07.240499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:07 crc kubenswrapper[4926]: I1125 18:13:07.240512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.228466 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.230127 4926 trace.go:236] Trace[392673648]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 18:12:59.071) (total time: 10158ms): Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[392673648]: ---"Objects listed" error: 10158ms (18:13:09.230) Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[392673648]: [10.158265274s] [10.158265274s] END Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.230184 4926 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.232598 4926 trace.go:236] Trace[1978432199]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 18:12:57.930) (total time: 11301ms): Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[1978432199]: ---"Objects listed" error: 11301ms (18:13:09.232) Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[1978432199]: [11.301792619s] [11.301792619s] END Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.232680 4926 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.232686 4926 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.232766 4926 trace.go:236] Trace[1150919354]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 18:12:54.661) (total time: 14570ms): Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[1150919354]: ---"Objects listed" error: 14570ms (18:13:09.232) Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[1150919354]: [14.570719014s] [14.570719014s] END Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.232789 4926 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.235147 4926 trace.go:236] Trace[1679851658]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 18:12:57.323) (total time: 11911ms): Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[1679851658]: ---"Objects listed" error: 11911ms (18:13:09.235) Nov 25 18:13:09 crc kubenswrapper[4926]: Trace[1679851658]: [11.911239977s] [11.911239977s] END Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.235169 4926 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.236090 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.261637 4926 apiserver.go:52] "Watching apiserver" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.266258 4926 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.266652 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.267088 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.267273 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.267320 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.267527 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.267551 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.267566 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.267615 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.267858 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.267912 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.269467 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.269707 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.270131 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.270132 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.270197 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.270244 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.270248 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.270904 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.272483 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.281150 4926 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.298097 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.307055 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.324230 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.332968 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333024 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333052 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333074 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333093 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333114 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333135 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333153 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333365 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333400 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333416 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333431 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333448 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333465 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333603 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333626 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333645 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333665 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333685 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333704 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333719 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333735 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334091 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334184 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334357 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334480 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334491 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334521 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334524 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334503 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334616 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334655 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334821 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334835 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334850 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334861 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.333881 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334929 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335219 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.334994 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335233 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335027 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335264 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335098 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335283 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335222 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335240 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335497 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335532 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335721 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335878 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.335302 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336037 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336058 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336125 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336402 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336466 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336844 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336541 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336298 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336780 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.336951 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337046 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337358 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337406 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337425 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337444 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337460 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337475 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337492 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337510 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337541 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337555 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337572 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337588 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337604 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337622 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337637 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337651 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337668 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337664 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337683 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337767 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337802 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337829 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337857 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337892 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337943 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337964 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.337995 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338007 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338025 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338055 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338080 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338191 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338178 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338255 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338208 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.338325 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:13:09.838296773 +0000 UTC m=+20.223810378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338342 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338351 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338401 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338427 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338454 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338505 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338529 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338559 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338585 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338614 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338639 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338663 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338637 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338699 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338737 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338931 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339171 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339181 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339276 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339290 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339316 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339449 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339486 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339641 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.339993 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340074 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340138 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340274 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340306 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340397 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340513 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340527 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.338689 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340601 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341328 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341363 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341407 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341435 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341460 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341482 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341503 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341524 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341543 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.340336 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341574 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341223 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341273 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341426 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341640 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341547 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341563 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341697 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341773 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341805 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341817 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341832 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341857 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341883 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341907 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341932 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341957 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.341983 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342012 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342043 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342067 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342092 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342115 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342134 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342151 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342168 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342186 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342205 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342223 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342243 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342261 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342274 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342284 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342628 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342661 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342689 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342711 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342733 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342755 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342779 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342800 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342823 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342828 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342848 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342873 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342902 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342924 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342944 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342970 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.342991 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343011 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343034 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343053 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343072 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343079 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343093 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343115 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343136 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343468 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343509 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343530 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343535 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343580 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343602 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343626 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343658 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343689 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343715 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343744 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343762 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343807 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343836 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343862 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343889 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343911 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343925 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343941 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343964 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.343984 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344004 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344025 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344037 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344050 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344087 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344108 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344128 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344337 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344357 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344425 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344444 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344461 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344481 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344499 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344517 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344536 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344544 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344556 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344578 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344594 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344613 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344633 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344686 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344705 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344722 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.344751 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.345828 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.345982 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.346475 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.346561 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.346528 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.346648 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.346811 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.346850 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.346918 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.347025 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.347291 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.347282 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.347562 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.348050 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.348095 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.348123 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.348415 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.348438 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.348910 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.349042 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.349222 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.349524 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.349691 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.349719 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.349918 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.350232 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.350244 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.350272 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.350388 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.350538 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.350650 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.351365 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.351684 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.351766 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.351807 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.351859 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.351925 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.351993 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352040 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352073 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352108 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352140 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352174 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352204 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352235 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352270 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352299 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352328 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352337 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352360 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352434 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352470 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352506 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352563 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352592 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352618 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352670 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352740 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352768 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352846 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.352868 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.353148 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.353165 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.353594 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.353737 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.353787 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.354098 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.354174 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.354415 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.354436 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.354568 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.354626 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.354921 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.355326 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.356060 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.356294 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.356711 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.356955 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.357154 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.357529 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.357572 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.357581 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.357815 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.373664 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374428 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374455 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374538 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374631 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374668 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374811 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374906 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374949 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.374985 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.375143 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.375338 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.375677 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.376072 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.376535 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.376677 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.376782 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.376905 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.377013 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.377105 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.377460 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.377550 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.377772 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.378035 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.378514 4926 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.378770 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.379014 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.379031 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.379217 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.379721 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.379840 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.379899 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.379972 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.380004 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.380051 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.380078 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.380905 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.383622 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.386686 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.389488 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.391467 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.383589 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.393716 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.401659 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403272 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.380963 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403456 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403487 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403662 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403687 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403702 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403717 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403730 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403745 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403761 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403774 4926 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403786 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403799 4926 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403812 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403824 4926 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403837 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403849 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403861 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403873 4926 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403885 4926 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403898 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403911 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403923 4926 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403935 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403947 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403960 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403974 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.403987 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404000 4926 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404012 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404025 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404038 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404050 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404061 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404073 4926 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404085 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404097 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404109 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404122 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404133 4926 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404145 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404155 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404167 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404179 4926 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404191 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404203 4926 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404214 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404226 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404238 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404251 4926 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404265 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404277 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404290 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404302 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404314 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404326 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404338 4926 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404350 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404363 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404393 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404406 4926 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404417 4926 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404429 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404440 4926 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404452 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404463 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404475 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404487 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404499 4926 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404510 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404526 4926 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404539 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404551 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404564 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404578 4926 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404590 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404602 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404613 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404625 4926 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404636 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404647 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404678 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404689 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404701 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404712 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404724 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404734 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404746 4926 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404758 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404769 4926 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404781 4926 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404793 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404806 4926 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404819 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404830 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404841 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404852 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404865 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404878 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404891 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404903 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404917 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404928 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404939 4926 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404954 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404966 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404977 4926 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.404997 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405008 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405020 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405032 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405043 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405055 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405065 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405077 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405088 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405098 4926 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405109 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405120 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405130 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405142 4926 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405155 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405166 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405177 4926 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405188 4926 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405199 4926 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405210 4926 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405222 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405235 4926 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405246 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405257 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405270 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405280 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405291 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405302 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405313 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405324 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405336 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405347 4926 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.405362 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.402759 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.405562 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.405785 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:09.905759852 +0000 UTC m=+20.291273657 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.382491 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.406477 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.406508 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.407862 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.408027 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.408092 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:09.908072936 +0000 UTC m=+20.293586541 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408243 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408313 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408343 4926 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408352 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408362 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408387 4926 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408396 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408405 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408414 4926 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408424 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408435 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408443 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408452 4926 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408461 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408469 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408479 4926 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408488 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408497 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408505 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408514 4926 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408525 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408534 4926 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408542 4926 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408552 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408561 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408572 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.408739 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.410504 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.414993 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.416940 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.417726 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.422496 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.422582 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.422743 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.422981 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.423031 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.423062 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.423137 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:09.92310257 +0000 UTC m=+20.308616355 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.423138 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.423517 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.423830 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.424978 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.428807 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.429398 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.431784 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.434298 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.434454 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.434596 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.434780 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:09.934750411 +0000 UTC m=+20.320264016 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.435638 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.436275 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.436703 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437069 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437166 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437325 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437434 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437543 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437561 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437578 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437666 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.437785 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.438326 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.440769 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.440896 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.440932 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.446507 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.451284 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.463817 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.466778 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.483832 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.504781 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509623 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509665 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509703 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509713 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509722 4926 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509733 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509741 4926 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509750 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509758 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509766 4926 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509775 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509783 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509792 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509800 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509808 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509818 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509826 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509834 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509843 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509852 4926 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509860 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509868 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509877 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509885 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509893 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509901 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509910 4926 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509918 4926 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509926 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509935 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509942 4926 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509945 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509950 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509974 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.509984 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.510065 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.514706 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.525774 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.538245 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.543902 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-w62m7"] Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.544228 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.546646 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.546764 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.546886 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.553929 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.568790 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.579325 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.586927 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.587777 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.597490 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.604163 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.611353 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/00adb94e-b8cf-4957-92d9-94e141cf6f06-hosts-file\") pod \"node-resolver-w62m7\" (UID: \"00adb94e-b8cf-4957-92d9-94e141cf6f06\") " pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.611436 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f24dz\" (UniqueName: \"kubernetes.io/projected/00adb94e-b8cf-4957-92d9-94e141cf6f06-kube-api-access-f24dz\") pod \"node-resolver-w62m7\" (UID: \"00adb94e-b8cf-4957-92d9-94e141cf6f06\") " pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: W1125 18:13:09.633059 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-68ea72129ca284435b3ee8db72a416bb69485dbda0bf2fe0970cd065a33d4795 WatchSource:0}: Error finding container 68ea72129ca284435b3ee8db72a416bb69485dbda0bf2fe0970cd065a33d4795: Status 404 returned error can't find the container with id 68ea72129ca284435b3ee8db72a416bb69485dbda0bf2fe0970cd065a33d4795 Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.639764 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.666254 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.684914 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.698500 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.711797 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/00adb94e-b8cf-4957-92d9-94e141cf6f06-hosts-file\") pod \"node-resolver-w62m7\" (UID: \"00adb94e-b8cf-4957-92d9-94e141cf6f06\") " pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.711845 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f24dz\" (UniqueName: \"kubernetes.io/projected/00adb94e-b8cf-4957-92d9-94e141cf6f06-kube-api-access-f24dz\") pod \"node-resolver-w62m7\" (UID: \"00adb94e-b8cf-4957-92d9-94e141cf6f06\") " pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.712140 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/00adb94e-b8cf-4957-92d9-94e141cf6f06-hosts-file\") pod \"node-resolver-w62m7\" (UID: \"00adb94e-b8cf-4957-92d9-94e141cf6f06\") " pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.712254 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.734893 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f24dz\" (UniqueName: \"kubernetes.io/projected/00adb94e-b8cf-4957-92d9-94e141cf6f06-kube-api-access-f24dz\") pod \"node-resolver-w62m7\" (UID: \"00adb94e-b8cf-4957-92d9-94e141cf6f06\") " pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.856441 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-w62m7" Nov 25 18:13:09 crc kubenswrapper[4926]: W1125 18:13:09.867768 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00adb94e_b8cf_4957_92d9_94e141cf6f06.slice/crio-6447d90b24c19e5784b6fff73dbcf7c1191e674801c63804a45d53b5ccf454dc WatchSource:0}: Error finding container 6447d90b24c19e5784b6fff73dbcf7c1191e674801c63804a45d53b5ccf454dc: Status 404 returned error can't find the container with id 6447d90b24c19e5784b6fff73dbcf7c1191e674801c63804a45d53b5ccf454dc Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.913129 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.913221 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:09 crc kubenswrapper[4926]: I1125 18:13:09.913340 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.913363 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:13:10.913334632 +0000 UTC m=+21.298848237 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.913455 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.913516 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.913575 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:10.913558458 +0000 UTC m=+21.299072163 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:09 crc kubenswrapper[4926]: E1125 18:13:09.913595 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:10.913585689 +0000 UTC m=+21.299099514 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.012278 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-skdzg"] Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.012663 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.013674 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.013732 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.013870 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.013907 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.013920 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.013977 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:11.013952215 +0000 UTC m=+21.399465820 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.013887 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.014226 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.014235 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.014448 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:11.014438568 +0000 UTC m=+21.399952173 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.015325 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.015540 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.015670 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.015975 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.016152 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.041715 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.064978 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.081574 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.097219 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.100114 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.103422 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.107805 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.115018 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-rootfs\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.115062 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-proxy-tls\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.115086 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-mcd-auth-proxy-config\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.115102 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk2ml\" (UniqueName: \"kubernetes.io/projected/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-kube-api-access-tk2ml\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.116882 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.126480 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.136696 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.142986 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.159526 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.169019 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.186079 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.196950 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.209654 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.215516 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-proxy-tls\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.215989 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-mcd-auth-proxy-config\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.216012 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk2ml\" (UniqueName: \"kubernetes.io/projected/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-kube-api-access-tk2ml\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.216041 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-rootfs\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.216080 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-rootfs\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.216547 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-mcd-auth-proxy-config\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.218674 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-proxy-tls\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.223546 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.232327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk2ml\" (UniqueName: \"kubernetes.io/projected/5655ebe9-673e-4e9e-ad75-edf6c92bddb7-kube-api-access-tk2ml\") pod \"machine-config-daemon-skdzg\" (UID: \"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\") " pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.239419 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.249525 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.268150 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.277857 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.286517 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.327296 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.329098 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.329231 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.332413 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.333100 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.334168 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.335582 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.336410 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.337408 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.338023 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.338548 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: W1125 18:13:10.339182 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5655ebe9_673e_4e9e_ad75_edf6c92bddb7.slice/crio-c46e6ebba45256140ea69bcaac30f1a9e55bc62af2ab2d6af1a0c6b838d47c06 WatchSource:0}: Error finding container c46e6ebba45256140ea69bcaac30f1a9e55bc62af2ab2d6af1a0c6b838d47c06: Status 404 returned error can't find the container with id c46e6ebba45256140ea69bcaac30f1a9e55bc62af2ab2d6af1a0c6b838d47c06 Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.339570 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.340141 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.341443 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.342197 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.343214 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.343756 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.344635 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.345219 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.345806 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.346709 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.347289 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.347930 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.348346 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.348864 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.349427 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.350268 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.350942 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.351796 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.352560 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.353691 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.355652 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.356337 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.357442 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.358358 4926 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.359000 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.361757 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.362943 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.363528 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.363881 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.365699 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.366863 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.367487 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.368772 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.369639 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.374496 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.375222 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.376409 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.377735 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.378733 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.378854 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.379504 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.380873 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.381783 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.383034 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.383624 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.384586 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.385459 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.386562 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.387843 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.395491 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.408039 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-r9lmm"] Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.408621 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-49qhh"] Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.408868 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.408909 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.410220 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zrwvb"] Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.411054 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.411260 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.413460 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.413624 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.413786 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.413908 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.414755 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.416280 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.417032 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.417314 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.423500 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.423532 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.423676 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.423773 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.423855 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.424175 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.458870 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"68ea72129ca284435b3ee8db72a416bb69485dbda0bf2fe0970cd065a33d4795"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.462431 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.462502 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.462522 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"89ff0e6e41402481f48370e61ec5214c1db87e00b0a6afd4cb6cabdd287dedab"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.465882 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.470217 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.470285 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"50b8498410ac4f4d796160596af9b55566473da443d7920b63cbfedeb2f5c249"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.475674 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-w62m7" event={"ID":"00adb94e-b8cf-4957-92d9-94e141cf6f06","Type":"ContainerStarted","Data":"330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.475753 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-w62m7" event={"ID":"00adb94e-b8cf-4957-92d9-94e141cf6f06","Type":"ContainerStarted","Data":"6447d90b24c19e5784b6fff73dbcf7c1191e674801c63804a45d53b5ccf454dc"} Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.483399 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"c46e6ebba45256140ea69bcaac30f1a9e55bc62af2ab2d6af1a0c6b838d47c06"} Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.498993 4926 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.500249 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.526886 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-etc-kubernetes\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.526932 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-config\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.526956 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.526973 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-kubelet\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.526997 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527019 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-os-release\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527081 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-hostroot\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527101 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-node-log\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527116 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-log-socket\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527140 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-socket-dir-parent\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527162 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-ovn\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527181 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527203 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-slash\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527220 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-ovn-kubernetes\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527239 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-bin\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527259 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-os-release\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527276 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-netns\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527294 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527324 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkbf2\" (UniqueName: \"kubernetes.io/projected/78af77fa-0071-48e9-8b78-bdd92abfb013-kube-api-access-qkbf2\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527344 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-kubelet\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527407 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-cnibin\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527431 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cni-binary-copy\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527452 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-system-cni-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527522 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-k8s-cni-cncf-io\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527702 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-var-lib-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527797 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-system-cni-dir\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527836 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-daemon-config\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527855 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-systemd-units\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.527878 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vzkx\" (UniqueName: \"kubernetes.io/projected/3d19408d-6957-4cfa-8ac3-f286155b4c2d-kube-api-access-2vzkx\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.528912 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-etc-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.528959 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/62905073-17d2-4b78-9921-02a343480b34-ovn-node-metrics-cert\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529032 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-multus-certs\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529059 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-netns\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529083 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-script-lib\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529109 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmzlv\" (UniqueName: \"kubernetes.io/projected/62905073-17d2-4b78-9921-02a343480b34-kube-api-access-xmzlv\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529130 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-cni-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529151 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/78af77fa-0071-48e9-8b78-bdd92abfb013-cni-binary-copy\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529398 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-cni-multus\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529423 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-env-overrides\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529444 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-conf-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529466 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-systemd\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529556 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cnibin\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529587 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-cni-bin\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.529610 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-netd\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.545639 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.574483 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.621742 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630160 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-system-cni-dir\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630201 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-daemon-config\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630217 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-systemd-units\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630443 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vzkx\" (UniqueName: \"kubernetes.io/projected/3d19408d-6957-4cfa-8ac3-f286155b4c2d-kube-api-access-2vzkx\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630461 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-etc-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630476 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/62905073-17d2-4b78-9921-02a343480b34-ovn-node-metrics-cert\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630497 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-multus-certs\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630513 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-netns\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630529 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-script-lib\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630546 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmzlv\" (UniqueName: \"kubernetes.io/projected/62905073-17d2-4b78-9921-02a343480b34-kube-api-access-xmzlv\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630566 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-cni-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630553 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-system-cni-dir\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630588 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-systemd-units\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630665 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-netns\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630638 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-multus-certs\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630684 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-etc-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630585 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/78af77fa-0071-48e9-8b78-bdd92abfb013-cni-binary-copy\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630759 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-cni-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630796 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-cni-multus\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630829 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-env-overrides\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630866 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-cni-multus\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630895 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cnibin\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630921 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-cni-bin\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630948 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-conf-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630970 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cnibin\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630980 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-cni-bin\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631001 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-systemd\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631004 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-conf-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.630972 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-systemd\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-netd\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631074 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-etc-kubernetes\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631102 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-config\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631108 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-netd\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631147 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-etc-kubernetes\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631145 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631175 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-kubelet\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631202 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631240 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-os-release\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631263 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-hostroot\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631293 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-socket-dir-parent\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631316 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-ovn\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631340 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-node-log\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631359 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-log-socket\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631404 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631424 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-slash\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631449 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-ovn-kubernetes\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631469 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-bin\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631495 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-os-release\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631515 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-netns\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631525 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-socket-dir-parent\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631528 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631838 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-script-lib\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631563 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631563 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-var-lib-kubelet\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631904 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-os-release\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631590 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-ovn-kubernetes\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631614 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-bin\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631954 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/78af77fa-0071-48e9-8b78-bdd92abfb013-cni-binary-copy\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631617 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-ovn\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631930 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-os-release\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631644 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-node-log\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631775 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-hostroot\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631804 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-netns\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631539 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631587 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-slash\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.631669 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-log-socket\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632053 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkbf2\" (UniqueName: \"kubernetes.io/projected/78af77fa-0071-48e9-8b78-bdd92abfb013-kube-api-access-qkbf2\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632078 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-kubelet\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632085 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632106 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-cnibin\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632137 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-kubelet\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632138 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cni-binary-copy\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632182 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-system-cni-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632202 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-k8s-cni-cncf-io\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632219 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-var-lib-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632277 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-var-lib-openvswitch\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632341 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-cnibin\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632391 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-host-run-k8s-cni-cncf-io\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632414 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78af77fa-0071-48e9-8b78-bdd92abfb013-system-cni-dir\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632426 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3d19408d-6957-4cfa-8ac3-f286155b4c2d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632746 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3d19408d-6957-4cfa-8ac3-f286155b4c2d-cni-binary-copy\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.632923 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-config\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.633190 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-env-overrides\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.633557 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/78af77fa-0071-48e9-8b78-bdd92abfb013-multus-daemon-config\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.637024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/62905073-17d2-4b78-9921-02a343480b34-ovn-node-metrics-cert\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.661902 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmzlv\" (UniqueName: \"kubernetes.io/projected/62905073-17d2-4b78-9921-02a343480b34-kube-api-access-xmzlv\") pod \"ovnkube-node-zrwvb\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.662102 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.664838 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vzkx\" (UniqueName: \"kubernetes.io/projected/3d19408d-6957-4cfa-8ac3-f286155b4c2d-kube-api-access-2vzkx\") pod \"multus-additional-cni-plugins-r9lmm\" (UID: \"3d19408d-6957-4cfa-8ac3-f286155b4c2d\") " pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.671117 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkbf2\" (UniqueName: \"kubernetes.io/projected/78af77fa-0071-48e9-8b78-bdd92abfb013-kube-api-access-qkbf2\") pod \"multus-49qhh\" (UID: \"78af77fa-0071-48e9-8b78-bdd92abfb013\") " pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.696554 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.715387 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.728451 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.742390 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.748477 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-49qhh" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.764895 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.766656 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.773383 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.783302 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: W1125 18:13:10.789049 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod62905073_17d2_4b78_9921_02a343480b34.slice/crio-087fa0f491efee00babbadfed1073fc9e87aa407fb06bc92bf83bb52c6619ca7 WatchSource:0}: Error finding container 087fa0f491efee00babbadfed1073fc9e87aa407fb06bc92bf83bb52c6619ca7: Status 404 returned error can't find the container with id 087fa0f491efee00babbadfed1073fc9e87aa407fb06bc92bf83bb52c6619ca7 Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.799153 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.816099 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.832456 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.849210 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.859952 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.880617 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.934684 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.934915 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:13:12.934873326 +0000 UTC m=+23.320386931 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.935060 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:10 crc kubenswrapper[4926]: I1125 18:13:10.935205 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.935272 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.935346 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.935391 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:12.935351339 +0000 UTC m=+23.320865124 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:10 crc kubenswrapper[4926]: E1125 18:13:10.935416 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:12.935407171 +0000 UTC m=+23.320920776 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.036722 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.036780 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.036934 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.036928 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.036985 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.036951 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.037000 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.037007 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.037062 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:13.037045512 +0000 UTC m=+23.422559117 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.037079 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:13.037073383 +0000 UTC m=+23.422586988 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.328457 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.328537 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.328588 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:11 crc kubenswrapper[4926]: E1125 18:13:11.328710 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.487617 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210" exitCode=0 Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.487723 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.487785 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"087fa0f491efee00babbadfed1073fc9e87aa407fb06bc92bf83bb52c6619ca7"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.489199 4926 generic.go:334] "Generic (PLEG): container finished" podID="3d19408d-6957-4cfa-8ac3-f286155b4c2d" containerID="ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242" exitCode=0 Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.489250 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerDied","Data":"ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.489337 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerStarted","Data":"aece6d8db581e1ca14a7f5eb3ae8290b1c386be7a696fd393b2de71916a1711a"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.490891 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerStarted","Data":"14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.490938 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerStarted","Data":"85a73149c4591f15780a6ed51e2af50fa1fb61e03166d56eb3922393271108cb"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.493044 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.493094 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1"} Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.507506 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.542106 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.569240 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.604880 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.627147 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.645949 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.664526 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.683545 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.697929 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.709492 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.725725 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.746944 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.764052 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.777300 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.791224 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.804766 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.820713 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.836405 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.852352 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.864752 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.877607 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.892151 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.908424 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.923984 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.933541 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-hnjr5"] Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.934003 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.936173 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.936670 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.938353 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.938396 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.938444 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 18:13:11 crc kubenswrapper[4926]: I1125 18:13:11.985692 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:11Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.017580 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.048079 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/55918e3c-e590-4127-95dc-759990298fca-host\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.048149 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55918e3c-e590-4127-95dc-759990298fca-serviceca\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.048294 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnm7r\" (UniqueName: \"kubernetes.io/projected/55918e3c-e590-4127-95dc-759990298fca-kube-api-access-gnm7r\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.066275 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.098410 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.149279 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnm7r\" (UniqueName: \"kubernetes.io/projected/55918e3c-e590-4127-95dc-759990298fca-kube-api-access-gnm7r\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.149329 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/55918e3c-e590-4127-95dc-759990298fca-host\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.149360 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55918e3c-e590-4127-95dc-759990298fca-serviceca\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.149486 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/55918e3c-e590-4127-95dc-759990298fca-host\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.150324 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55918e3c-e590-4127-95dc-759990298fca-serviceca\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.152956 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.171769 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnm7r\" (UniqueName: \"kubernetes.io/projected/55918e3c-e590-4127-95dc-759990298fca-kube-api-access-gnm7r\") pod \"node-ca-hnjr5\" (UID: \"55918e3c-e590-4127-95dc-759990298fca\") " pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.201148 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.239359 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.278858 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.280056 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-hnjr5" Nov 25 18:13:12 crc kubenswrapper[4926]: W1125 18:13:12.292816 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55918e3c_e590_4127_95dc_759990298fca.slice/crio-cf85340af028f0393b00d79da207ceef6b40b9ba24107504271f5d4ac081d290 WatchSource:0}: Error finding container cf85340af028f0393b00d79da207ceef6b40b9ba24107504271f5d4ac081d290: Status 404 returned error can't find the container with id cf85340af028f0393b00d79da207ceef6b40b9ba24107504271f5d4ac081d290 Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.319393 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.328631 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:12 crc kubenswrapper[4926]: E1125 18:13:12.328809 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.361272 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.399164 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.441466 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.481110 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.499740 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.499799 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.499816 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.499830 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.500973 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-hnjr5" event={"ID":"55918e3c-e590-4127-95dc-759990298fca","Type":"ContainerStarted","Data":"cf85340af028f0393b00d79da207ceef6b40b9ba24107504271f5d4ac081d290"} Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.504133 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2"} Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.507133 4926 generic.go:334] "Generic (PLEG): container finished" podID="3d19408d-6957-4cfa-8ac3-f286155b4c2d" containerID="38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91" exitCode=0 Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.507216 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerDied","Data":"38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91"} Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.519005 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.565504 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.599333 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.638557 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.679851 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.718584 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.762313 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.801337 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.839228 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.879195 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.918617 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.959484 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.959668 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.959724 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:12 crc kubenswrapper[4926]: E1125 18:13:12.959766 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:13:16.959738964 +0000 UTC m=+27.345252579 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:13:12 crc kubenswrapper[4926]: E1125 18:13:12.959792 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:12 crc kubenswrapper[4926]: E1125 18:13:12.959859 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:12 crc kubenswrapper[4926]: E1125 18:13:12.959915 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:16.959904139 +0000 UTC m=+27.345417744 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:12 crc kubenswrapper[4926]: E1125 18:13:12.959937 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:16.959927769 +0000 UTC m=+27.345441374 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:12 crc kubenswrapper[4926]: I1125 18:13:12.961780 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:12Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.022791 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.044927 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.061167 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.061225 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061391 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061421 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061433 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061490 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:17.061475258 +0000 UTC m=+27.446988863 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061798 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061816 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061825 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.061871 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:17.061864278 +0000 UTC m=+27.447377883 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.083520 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.117228 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.328302 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.328322 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.328446 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:13 crc kubenswrapper[4926]: E1125 18:13:13.328574 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.354655 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.367068 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.368022 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.369159 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.381111 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.392101 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.405808 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.425899 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.438682 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.447456 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.465096 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.498235 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.510540 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-hnjr5" event={"ID":"55918e3c-e590-4127-95dc-759990298fca","Type":"ContainerStarted","Data":"38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49"} Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.512672 4926 generic.go:334] "Generic (PLEG): container finished" podID="3d19408d-6957-4cfa-8ac3-f286155b4c2d" containerID="0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc" exitCode=0 Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.512737 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerDied","Data":"0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc"} Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.516269 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.516313 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.539792 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.581948 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.623783 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.659904 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.700098 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.745879 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.779006 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.819892 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.858132 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.898459 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.939583 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:13 crc kubenswrapper[4926]: I1125 18:13:13.978854 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:13Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.017981 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.057899 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.098516 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.141819 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.177982 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.217780 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.263760 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.296819 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.329019 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:14 crc kubenswrapper[4926]: E1125 18:13:14.329143 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.521184 4926 generic.go:334] "Generic (PLEG): container finished" podID="3d19408d-6957-4cfa-8ac3-f286155b4c2d" containerID="b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a" exitCode=0 Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.521246 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerDied","Data":"b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a"} Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.531538 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.554018 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.567743 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.579807 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.615697 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.637888 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.658990 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.678619 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.693444 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.712650 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.742763 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.779042 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.820316 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.858536 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:14 crc kubenswrapper[4926]: I1125 18:13:14.903456 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:14Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.328851 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.328908 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.328989 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.329138 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.529877 4926 generic.go:334] "Generic (PLEG): container finished" podID="3d19408d-6957-4cfa-8ac3-f286155b4c2d" containerID="e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6" exitCode=0 Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.529960 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerDied","Data":"e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.536494 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.556187 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.578473 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.593170 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.611071 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.625166 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.636534 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.638004 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.639874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.639926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.639940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.640069 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.647745 4926 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.648278 4926 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.650088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.650134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.650149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.650172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.650188 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.651468 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.664169 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.665334 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.668469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.668524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.668537 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.668558 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.668572 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.677310 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.679144 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.682878 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.682912 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.682924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.682942 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.682954 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.689031 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.694552 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.698904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.699016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.699079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.699183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.699302 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.708650 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.712726 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.716152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.716183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.716192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.716206 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.716215 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.728716 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: E1125 18:13:15.729052 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.730025 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.731299 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.731358 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.731422 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.731455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.731478 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.743264 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.755508 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.770710 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:15Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.834402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.834442 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.834453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.834469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.834478 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.937203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.937253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.937264 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.937285 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:15 crc kubenswrapper[4926]: I1125 18:13:15.937297 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:15Z","lastTransitionTime":"2025-11-25T18:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.039822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.039861 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.039870 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.039887 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.039902 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.141877 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.141912 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.141921 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.141934 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.141944 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.244613 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.244832 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.244909 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.244989 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.245064 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.328930 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:16 crc kubenswrapper[4926]: E1125 18:13:16.329309 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.347503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.347552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.347568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.347591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.347609 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.450282 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.450330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.450342 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.450560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.450574 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.547552 4926 generic.go:334] "Generic (PLEG): container finished" podID="3d19408d-6957-4cfa-8ac3-f286155b4c2d" containerID="78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d" exitCode=0 Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.547599 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerDied","Data":"78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.552788 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.552859 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.552873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.552906 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.552920 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.561989 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.576977 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.591481 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.603095 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.611952 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.629085 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.647639 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.655450 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.655494 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.655503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.655524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.655534 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.666626 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.678215 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.694273 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.710154 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.722392 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.735547 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.750812 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.759998 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.760056 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.760070 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.760089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.760102 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.772569 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:16Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.862823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.862873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.862885 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.862904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.862917 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.965495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.965966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.965977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.965993 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.966003 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:16Z","lastTransitionTime":"2025-11-25T18:13:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.997300 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:16 crc kubenswrapper[4926]: E1125 18:13:16.997519 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:13:24.997483919 +0000 UTC m=+35.382997524 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.997609 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:16 crc kubenswrapper[4926]: I1125 18:13:16.997666 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:16 crc kubenswrapper[4926]: E1125 18:13:16.997809 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:16 crc kubenswrapper[4926]: E1125 18:13:16.997812 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:16 crc kubenswrapper[4926]: E1125 18:13:16.997855 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:24.997847129 +0000 UTC m=+35.383360734 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:16 crc kubenswrapper[4926]: E1125 18:13:16.997982 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:24.997958132 +0000 UTC m=+35.383471757 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.069229 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.069300 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.069311 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.069326 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.069336 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.098827 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.098898 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099015 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099032 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099045 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099090 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:25.099077289 +0000 UTC m=+35.484590894 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099306 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099354 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099387 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.099452 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:25.099436929 +0000 UTC m=+35.484950534 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.171887 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.171935 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.171944 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.171960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.171970 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.244970 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.261032 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.274240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.274281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.274293 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.274311 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.274326 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.290364 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.307240 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.323764 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.328427 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.328497 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.328526 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:17 crc kubenswrapper[4926]: E1125 18:13:17.328635 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.341025 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.361020 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.376966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.377028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.377042 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.377071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.377084 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.383206 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.402915 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.418967 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.434498 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.453046 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.467491 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.480229 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.480311 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.480333 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.480364 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.480413 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.485925 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.498813 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.530823 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.558823 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" event={"ID":"3d19408d-6957-4cfa-8ac3-f286155b4c2d","Type":"ContainerStarted","Data":"746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.564758 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.565142 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.580456 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.583407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.583467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.583491 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.583521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.583544 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.598079 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.606152 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.618105 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.644323 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.660826 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.679702 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.690490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.690602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.690623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.690650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.690664 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.704219 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.726307 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.745063 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.764864 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.783029 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.793678 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.793750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.793765 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.793787 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.793803 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.806921 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.827963 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.838346 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.854976 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.866239 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.880568 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.893406 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.896049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.896091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.896104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.896121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.896132 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.924310 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.945400 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.961884 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.980101 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.990901 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.998930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.998957 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.998968 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.998984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:17 crc kubenswrapper[4926]: I1125 18:13:17.998994 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:17Z","lastTransitionTime":"2025-11-25T18:13:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.000881 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.012007 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.023779 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.035324 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.043897 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.059409 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.070819 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.101706 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.101743 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.101752 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.101765 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.101774 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.203942 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.203980 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.203990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.204006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.204015 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.307221 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.307277 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.307294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.307315 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.307330 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.328735 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:18 crc kubenswrapper[4926]: E1125 18:13:18.328896 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.409868 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.409914 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.409925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.409942 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.409952 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.518246 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.518292 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.518304 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.518321 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.518337 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.567639 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.568117 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.590141 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.606145 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.618989 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.620396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.620418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.620427 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.620441 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.620450 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.629333 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.647065 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.659469 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.671090 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.684757 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.697048 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.709113 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.722366 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.722424 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.722434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.722449 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.722459 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.723717 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.738104 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.761683 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.775252 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.786457 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.799081 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:18Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.824672 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.824704 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.824714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.824729 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.824737 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.926757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.926796 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.926808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.926821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:18 crc kubenswrapper[4926]: I1125 18:13:18.926831 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:18Z","lastTransitionTime":"2025-11-25T18:13:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.029085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.029134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.029146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.029164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.029180 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.130852 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.130890 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.130907 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.130923 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.130935 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.236017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.236060 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.236072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.236089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.236100 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.328492 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:19 crc kubenswrapper[4926]: E1125 18:13:19.328604 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.328493 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:19 crc kubenswrapper[4926]: E1125 18:13:19.328662 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.339169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.339218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.339231 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.339246 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.339256 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.441802 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.441834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.441844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.441858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.441867 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.545056 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.545117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.545129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.545147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.545173 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.570323 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.647812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.647850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.647861 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.647879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.647891 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.750256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.750300 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.750308 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.750330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.750343 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.854144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.854184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.854193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.854207 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.854216 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.958051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.958352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.958360 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.958388 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:19 crc kubenswrapper[4926]: I1125 18:13:19.958429 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:19Z","lastTransitionTime":"2025-11-25T18:13:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.061929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.061964 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.061972 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.061985 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.061994 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.164468 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.164692 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.164704 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.164720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.164732 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.267958 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.267994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.268003 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.268018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.268027 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.328550 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:20 crc kubenswrapper[4926]: E1125 18:13:20.328771 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.348785 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.359200 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.370460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.370563 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.370581 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.370605 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.370624 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.377296 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.391954 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.401939 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.419055 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.434546 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.453690 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.473720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.473874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.473894 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.473922 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.473946 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.474262 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.496870 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.516209 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.528678 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.544965 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.561313 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.577082 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.577996 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.578054 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.578066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.578082 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.578092 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.580745 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/0.log" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.586285 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6" exitCode=1 Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.586352 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.587175 4926 scope.go:117] "RemoveContainer" containerID="74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.606683 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.627848 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.645752 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.666913 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.681654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.681718 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.681736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.681767 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.681795 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.689155 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.707432 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.721146 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.754930 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:20Z\\\",\\\"message\\\":\\\"] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.091945 6222 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 18:13:20.092140 6222 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.092510 6222 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.092555 6222 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.092587 6222 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 18:13:20.092573 6222 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:20.092636 6222 factory.go:656] Stopping watch factory\\\\nI1125 18:13:20.092641 6222 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 18:13:20.092679 6222 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.771765 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.784819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.784855 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.784865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.784880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.784893 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.798137 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.815100 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.828086 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.840354 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.858188 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.872088 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.887138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.887184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.887198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.887217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.887229 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.990167 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.990207 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.990221 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.990239 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:20 crc kubenswrapper[4926]: I1125 18:13:20.990251 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:20Z","lastTransitionTime":"2025-11-25T18:13:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.092431 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.092461 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.092472 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.092489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.092500 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.194952 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.194988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.195004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.195018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.195027 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.297539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.297585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.297596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.297615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.297629 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.328775 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.328807 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:21 crc kubenswrapper[4926]: E1125 18:13:21.328897 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:21 crc kubenswrapper[4926]: E1125 18:13:21.328976 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.399591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.399628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.399639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.399656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.399669 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.502425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.502466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.502476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.502489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.502499 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.590725 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/1.log" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.591440 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/0.log" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.594108 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70" exitCode=1 Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.594136 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.594202 4926 scope.go:117] "RemoveContainer" containerID="74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.594872 4926 scope.go:117] "RemoveContainer" containerID="f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70" Nov 25 18:13:21 crc kubenswrapper[4926]: E1125 18:13:21.595048 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.605095 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.605136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.605147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.605162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.605172 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.611677 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.626620 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.638864 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.650432 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.665659 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.681397 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.698912 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.707496 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.707531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.707543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.707562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.707577 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.710287 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.723850 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.738318 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.751488 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.762328 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.779978 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74e4571d5995c40ff394e467429c41d5e1350d47465da2da4181c4d7871b4ea6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:20Z\\\",\\\"message\\\":\\\"] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.091945 6222 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 18:13:20.092140 6222 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.092510 6222 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.092555 6222 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:20.092587 6222 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 18:13:20.092573 6222 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:20.092636 6222 factory.go:656] Stopping watch factory\\\\nI1125 18:13:20.092641 6222 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 18:13:20.092679 6222 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:21Z\\\",\\\"message\\\":\\\"rvice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:21.456350 6349 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 18:13:21.456397 6349 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 18:13:21.456403 6349 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 18:13:21.456422 6349 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:21.456430 6349 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 18:13:21.456439 6349 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 18:13:21.456446 6349 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:21.456456 6349 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:21.456457 6349 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 18:13:21.456459 6349 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 18:13:21.456467 6349 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 18:13:21.456475 6349 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 18:13:21.456526 6349 factory.go:656] Stopping watch factory\\\\nI1125 18:13:21.456541 6349 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:21.456546 6349 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 18\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.794107 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.808446 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:21Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.809644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.809680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.809700 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.809718 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.809729 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.911926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.911977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.911988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.912003 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:21 crc kubenswrapper[4926]: I1125 18:13:21.912014 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:21Z","lastTransitionTime":"2025-11-25T18:13:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.014550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.014588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.014598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.014615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.014624 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.116896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.116940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.116948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.116963 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.116973 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.219728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.219763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.219774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.219789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.219802 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.322009 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.322050 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.322063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.322079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.322090 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.328590 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:22 crc kubenswrapper[4926]: E1125 18:13:22.328722 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.423872 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.423908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.423918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.423932 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.423942 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.526580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.526623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.526633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.526823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.526835 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.599211 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/1.log" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.602319 4926 scope.go:117] "RemoveContainer" containerID="f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70" Nov 25 18:13:22 crc kubenswrapper[4926]: E1125 18:13:22.602530 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.617872 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.629067 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s"] Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.630453 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.633490 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.633581 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.633824 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.634119 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.634167 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.634195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.634223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.634242 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.646041 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.657092 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.674916 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:21Z\\\",\\\"message\\\":\\\"rvice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:21.456350 6349 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 18:13:21.456397 6349 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 18:13:21.456403 6349 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 18:13:21.456422 6349 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:21.456430 6349 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 18:13:21.456439 6349 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 18:13:21.456446 6349 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:21.456456 6349 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:21.456457 6349 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 18:13:21.456459 6349 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 18:13:21.456467 6349 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 18:13:21.456475 6349 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 18:13:21.456526 6349 factory.go:656] Stopping watch factory\\\\nI1125 18:13:21.456541 6349 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:21.456546 6349 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 18\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.685281 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.702394 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.714318 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.726159 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.737143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.737173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.737184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.737200 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.737223 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.738773 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.751170 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.760868 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d78292c0-9831-49d7-a282-63d27069e6f9-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.760967 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d78292c0-9831-49d7-a282-63d27069e6f9-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.761080 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dptzn\" (UniqueName: \"kubernetes.io/projected/d78292c0-9831-49d7-a282-63d27069e6f9-kube-api-access-dptzn\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.761228 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d78292c0-9831-49d7-a282-63d27069e6f9-env-overrides\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.766346 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.781641 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.793793 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.807158 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.824803 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.840798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.840865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.840884 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.840915 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.840938 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.844494 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.859347 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.862778 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d78292c0-9831-49d7-a282-63d27069e6f9-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.862820 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d78292c0-9831-49d7-a282-63d27069e6f9-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.862845 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dptzn\" (UniqueName: \"kubernetes.io/projected/d78292c0-9831-49d7-a282-63d27069e6f9-kube-api-access-dptzn\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.862883 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d78292c0-9831-49d7-a282-63d27069e6f9-env-overrides\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.863449 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d78292c0-9831-49d7-a282-63d27069e6f9-env-overrides\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.863708 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d78292c0-9831-49d7-a282-63d27069e6f9-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.870856 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d78292c0-9831-49d7-a282-63d27069e6f9-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.874390 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.877941 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dptzn\" (UniqueName: \"kubernetes.io/projected/d78292c0-9831-49d7-a282-63d27069e6f9-kube-api-access-dptzn\") pod \"ovnkube-control-plane-749d76644c-b7k6s\" (UID: \"d78292c0-9831-49d7-a282-63d27069e6f9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.888602 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.901308 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.913226 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.924820 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.943413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.943679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.943746 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.943813 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.943871 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:22Z","lastTransitionTime":"2025-11-25T18:13:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.945105 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:21Z\\\",\\\"message\\\":\\\"rvice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:21.456350 6349 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 18:13:21.456397 6349 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 18:13:21.456403 6349 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 18:13:21.456422 6349 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:21.456430 6349 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 18:13:21.456439 6349 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 18:13:21.456446 6349 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:21.456456 6349 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:21.456457 6349 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 18:13:21.456459 6349 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 18:13:21.456467 6349 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 18:13:21.456475 6349 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 18:13:21.456526 6349 factory.go:656] Stopping watch factory\\\\nI1125 18:13:21.456541 6349 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:21.456546 6349 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 18\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.952800 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.957621 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: W1125 18:13:22.970235 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd78292c0_9831_49d7_a282_63d27069e6f9.slice/crio-0889ffa1bcdfe2985317bcde8fbc985ce13c22816a58a2a232c43e85177f4f04 WatchSource:0}: Error finding container 0889ffa1bcdfe2985317bcde8fbc985ce13c22816a58a2a232c43e85177f4f04: Status 404 returned error can't find the container with id 0889ffa1bcdfe2985317bcde8fbc985ce13c22816a58a2a232c43e85177f4f04 Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.972635 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:22 crc kubenswrapper[4926]: I1125 18:13:22.993055 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:22Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.017088 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.030269 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.042348 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.046451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.046484 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.046496 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.046513 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.046526 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.056763 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.149362 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.149405 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.149413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.149427 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.149436 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.251328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.251393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.251405 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.251425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.251438 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.328228 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.328316 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:23 crc kubenswrapper[4926]: E1125 18:13:23.328418 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:23 crc kubenswrapper[4926]: E1125 18:13:23.328480 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.354120 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.354171 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.354184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.354203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.354215 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.456612 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.456654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.456663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.456679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.456689 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.559487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.559527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.559537 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.559553 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.559565 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.608136 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" event={"ID":"d78292c0-9831-49d7-a282-63d27069e6f9","Type":"ContainerStarted","Data":"638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.608242 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" event={"ID":"d78292c0-9831-49d7-a282-63d27069e6f9","Type":"ContainerStarted","Data":"bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.608272 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" event={"ID":"d78292c0-9831-49d7-a282-63d27069e6f9","Type":"ContainerStarted","Data":"0889ffa1bcdfe2985317bcde8fbc985ce13c22816a58a2a232c43e85177f4f04"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.626915 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.646650 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:21Z\\\",\\\"message\\\":\\\"rvice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:21.456350 6349 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 18:13:21.456397 6349 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 18:13:21.456403 6349 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 18:13:21.456422 6349 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:21.456430 6349 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 18:13:21.456439 6349 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 18:13:21.456446 6349 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:21.456456 6349 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:21.456457 6349 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 18:13:21.456459 6349 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 18:13:21.456467 6349 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 18:13:21.456475 6349 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 18:13:21.456526 6349 factory.go:656] Stopping watch factory\\\\nI1125 18:13:21.456541 6349 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:21.456546 6349 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 18\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.659913 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.662620 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.662667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.662680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.662702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.662718 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.681748 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.693575 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.707167 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.719976 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.740947 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.754500 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.765132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.765172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.765185 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.765202 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.765216 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.768639 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.804189 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.817599 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.828452 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.843070 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.856029 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.866207 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:23Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.867771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.867800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.867810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.867825 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.867836 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.970096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.970134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.970144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.970160 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:23 crc kubenswrapper[4926]: I1125 18:13:23.970170 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:23Z","lastTransitionTime":"2025-11-25T18:13:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.072774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.072837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.072850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.072898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.072907 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.112704 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-2mwzk"] Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.113696 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:24 crc kubenswrapper[4926]: E1125 18:13:24.113835 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.131710 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.146612 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.161929 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.175645 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.175716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.175740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.175769 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.175793 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.177360 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.191812 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.208646 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.232386 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:21Z\\\",\\\"message\\\":\\\"rvice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:21.456350 6349 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 18:13:21.456397 6349 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 18:13:21.456403 6349 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 18:13:21.456422 6349 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:21.456430 6349 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 18:13:21.456439 6349 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 18:13:21.456446 6349 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:21.456456 6349 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:21.456457 6349 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 18:13:21.456459 6349 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 18:13:21.456467 6349 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 18:13:21.456475 6349 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 18:13:21.456526 6349 factory.go:656] Stopping watch factory\\\\nI1125 18:13:21.456541 6349 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:21.456546 6349 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 18\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.248705 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.267449 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.273569 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.273665 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dsbl\" (UniqueName: \"kubernetes.io/projected/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-kube-api-access-6dsbl\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.278559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.278600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.278613 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.278633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.278648 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.285474 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.299582 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.324142 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.328393 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:24 crc kubenswrapper[4926]: E1125 18:13:24.328528 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.344828 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.367303 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.375113 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:24 crc kubenswrapper[4926]: E1125 18:13:24.375351 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:24 crc kubenswrapper[4926]: E1125 18:13:24.375533 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:24.875503897 +0000 UTC m=+35.261017512 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.375465 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dsbl\" (UniqueName: \"kubernetes.io/projected/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-kube-api-access-6dsbl\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.380686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.380853 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.380971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.381073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.381156 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.391050 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.400522 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dsbl\" (UniqueName: \"kubernetes.io/projected/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-kube-api-access-6dsbl\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.423152 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.434243 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:24Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.483176 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.483244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.483253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.483273 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.483283 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.586903 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.586948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.586960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.586976 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.586984 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.690253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.690291 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.690300 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.690318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.690328 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.792972 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.793014 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.793024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.793043 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.793054 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.881004 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:24 crc kubenswrapper[4926]: E1125 18:13:24.881211 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:24 crc kubenswrapper[4926]: E1125 18:13:24.881308 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:25.881286216 +0000 UTC m=+36.266799921 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.895032 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.895064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.895073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.895087 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.895096 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.997140 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.997182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.997195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.997217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:24 crc kubenswrapper[4926]: I1125 18:13:24.997231 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:24Z","lastTransitionTime":"2025-11-25T18:13:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.083189 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.083434 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.083465 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:13:41.083432768 +0000 UTC m=+51.468946383 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.083546 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.083554 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.083619 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:41.083599072 +0000 UTC m=+51.469112777 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.083743 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.083834 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:41.083807819 +0000 UTC m=+51.469321464 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.099617 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.099658 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.099670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.099686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.099697 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.184929 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.184997 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185114 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185132 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185143 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185202 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:41.185184183 +0000 UTC m=+51.570697788 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185219 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185252 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185270 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.185321 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:41.185304516 +0000 UTC m=+51.570818151 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.201817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.201870 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.201887 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.201914 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.201931 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.304680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.304731 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.304740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.304756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.304765 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.328793 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.328846 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.328838 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.328941 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.329038 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.329253 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.406903 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.406965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.406987 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.407017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.407039 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.509248 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.509279 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.509289 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.509303 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.509313 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.613264 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.613331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.613354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.613428 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.613451 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.715597 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.715650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.715675 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.715698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.715713 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.817917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.818029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.818052 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.818075 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.818089 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.895869 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.896035 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: E1125 18:13:25.896142 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:27.896117877 +0000 UTC m=+38.281631552 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.920707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.920744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.920755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.920770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:25 crc kubenswrapper[4926]: I1125 18:13:25.920780 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:25Z","lastTransitionTime":"2025-11-25T18:13:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.023064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.023121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.023129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.023141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.023151 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.125325 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.125351 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.125359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.125392 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.125401 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.126083 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.126105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.126113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.126122 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.126131 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: E1125 18:13:26.140360 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:26Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.143751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.143780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.143792 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.143805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.143814 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: E1125 18:13:26.156695 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:26Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.161394 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.161420 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.161428 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.161444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.161454 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: E1125 18:13:26.174525 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:26Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.177107 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.177138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.177149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.177164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.177175 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: E1125 18:13:26.193776 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:26Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.197873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.197900 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.197910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.197925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.197935 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: E1125 18:13:26.209151 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:26Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:26 crc kubenswrapper[4926]: E1125 18:13:26.209258 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.227440 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.227470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.227478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.227490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.227499 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.328980 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:26 crc kubenswrapper[4926]: E1125 18:13:26.329190 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.330598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.330659 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.330683 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.330720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.330743 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.432544 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.432579 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.432587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.432599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.432608 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.535010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.535044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.535054 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.535067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.535076 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.637213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.637266 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.637280 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.637301 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.637316 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.739732 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.739793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.739815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.739843 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.739866 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.842211 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.842250 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.842259 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.842275 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.842283 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.944758 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.944815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.944830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.944849 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:26 crc kubenswrapper[4926]: I1125 18:13:26.944864 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:26Z","lastTransitionTime":"2025-11-25T18:13:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.047568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.047615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.047628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.047644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.047656 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.149812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.149857 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.149872 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.149890 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.149905 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.252015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.252072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.252083 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.252100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.252109 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.329163 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.329230 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.329260 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:27 crc kubenswrapper[4926]: E1125 18:13:27.329367 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:27 crc kubenswrapper[4926]: E1125 18:13:27.329476 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:27 crc kubenswrapper[4926]: E1125 18:13:27.329571 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.354885 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.354933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.354948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.354968 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.354983 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.457407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.457466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.457478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.457492 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.457502 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.559923 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.559975 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.559984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.559999 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.560010 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.661956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.662006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.662016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.662030 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.662039 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.764073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.764114 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.764125 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.764144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.764155 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.866334 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.866681 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.866702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.866720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.866731 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.916848 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:27 crc kubenswrapper[4926]: E1125 18:13:27.916991 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:27 crc kubenswrapper[4926]: E1125 18:13:27.917043 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:31.917030216 +0000 UTC m=+42.302543821 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.969389 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.969444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.969460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.969481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:27 crc kubenswrapper[4926]: I1125 18:13:27.969497 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:27Z","lastTransitionTime":"2025-11-25T18:13:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.071470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.071513 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.071529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.071545 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.071555 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.174282 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.174320 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.174331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.174349 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.174366 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.277276 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.277337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.277354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.277425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.277443 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.328975 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:28 crc kubenswrapper[4926]: E1125 18:13:28.329104 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.379356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.379419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.379429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.379445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.379454 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.481924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.481950 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.481959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.481972 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.481981 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.584747 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.585026 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.585129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.585261 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.585527 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.689876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.690227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.690363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.690590 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.690740 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.794995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.795584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.795666 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.795751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.795848 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.899934 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.899971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.899980 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.899998 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:28 crc kubenswrapper[4926]: I1125 18:13:28.900015 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:28Z","lastTransitionTime":"2025-11-25T18:13:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.003793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.004024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.004175 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.004254 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.004326 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.107049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.107092 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.107105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.107123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.107134 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.210032 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.210127 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.210148 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.210173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.210222 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.313748 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.313791 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.313802 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.313820 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.313830 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.329058 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.329133 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.329168 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:29 crc kubenswrapper[4926]: E1125 18:13:29.329243 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:29 crc kubenswrapper[4926]: E1125 18:13:29.329456 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:29 crc kubenswrapper[4926]: E1125 18:13:29.329605 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.417344 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.417451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.417477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.417506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.417527 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.519917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.519946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.519954 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.519966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.519974 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.621899 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.621943 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.621960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.621980 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.621997 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.724162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.724248 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.724259 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.724276 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.724305 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.826467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.826522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.826535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.826554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.826568 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.928959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.928993 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.929002 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.929015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:29 crc kubenswrapper[4926]: I1125 18:13:29.929024 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:29Z","lastTransitionTime":"2025-11-25T18:13:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.030426 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.030465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.030495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.030512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.030522 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.133024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.133093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.133104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.133141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.133153 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.235941 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.235997 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.236006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.236019 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.236027 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.328478 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:30 crc kubenswrapper[4926]: E1125 18:13:30.328688 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.337482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.337546 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.337569 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.337598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.337620 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.343395 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.357084 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.371620 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.403580 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.419502 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.436709 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.439459 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.439528 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.439548 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.439572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.439590 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.449616 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.466865 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.480243 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.493970 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.504491 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.514018 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.529544 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.541675 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.541724 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.541740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.541763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.541778 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.542799 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.554760 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.565420 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.593826 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:21Z\\\",\\\"message\\\":\\\"rvice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:21.456350 6349 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 18:13:21.456397 6349 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 18:13:21.456403 6349 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 18:13:21.456422 6349 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:21.456430 6349 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 18:13:21.456439 6349 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 18:13:21.456446 6349 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:21.456456 6349 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:21.456457 6349 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 18:13:21.456459 6349 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 18:13:21.456467 6349 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 18:13:21.456475 6349 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 18:13:21.456526 6349 factory.go:656] Stopping watch factory\\\\nI1125 18:13:21.456541 6349 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:21.456546 6349 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 18\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:30Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.644463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.644511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.644529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.644549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.644566 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.746535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.746586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.746604 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.746623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.746637 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.849322 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.849398 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.849413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.849452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.849465 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.951609 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.951660 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.951675 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.951699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:30 crc kubenswrapper[4926]: I1125 18:13:30.951714 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:30Z","lastTransitionTime":"2025-11-25T18:13:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.053895 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.053938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.053948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.053965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.053975 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.156727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.156764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.156789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.156804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.156812 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.258924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.258977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.258995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.259018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.259035 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.328701 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.328807 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.328838 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:31 crc kubenswrapper[4926]: E1125 18:13:31.328987 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:31 crc kubenswrapper[4926]: E1125 18:13:31.329183 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:31 crc kubenswrapper[4926]: E1125 18:13:31.329397 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.361137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.361187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.361196 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.361213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.361223 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.464750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.464794 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.464805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.464822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.464833 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.523423 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.524291 4926 scope.go:117] "RemoveContainer" containerID="f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.566971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.567031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.567044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.567058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.567068 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.635794 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/1.log" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.637934 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.669256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.669310 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.669324 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.669348 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.669362 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.771819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.771862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.771874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.771888 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.771898 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.876632 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.876674 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.876683 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.876698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.876707 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.957743 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:31 crc kubenswrapper[4926]: E1125 18:13:31.957897 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:31 crc kubenswrapper[4926]: E1125 18:13:31.957951 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:39.957937067 +0000 UTC m=+50.343450672 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.978601 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.978641 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.978650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.978664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:31 crc kubenswrapper[4926]: I1125 18:13:31.978675 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:31Z","lastTransitionTime":"2025-11-25T18:13:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.081279 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.081318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.081328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.081345 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.081355 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.184269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.184359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.184409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.184436 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.184477 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.286752 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.287411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.287430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.287453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.287467 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.328685 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:32 crc kubenswrapper[4926]: E1125 18:13:32.328867 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.390132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.390180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.390197 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.390215 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.390232 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.493151 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.493226 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.493243 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.493265 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.493283 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.595493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.595523 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.595533 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.595547 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.595555 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.642089 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/2.log" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.642967 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/1.log" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.645862 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84" exitCode=1 Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.645897 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.645929 4926 scope.go:117] "RemoveContainer" containerID="f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.647750 4926 scope.go:117] "RemoveContainer" containerID="217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84" Nov 25 18:13:32 crc kubenswrapper[4926]: E1125 18:13:32.648258 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.666621 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.684230 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.697560 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.698180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.698213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.698227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.698243 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.698257 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.709293 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.717815 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.730654 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.743154 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.753728 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.764347 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.789047 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5c9b5f93d7e8be2c9a0232eaf52126d8854896ca7f768708c75376d5d2fdf70\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:21Z\\\",\\\"message\\\":\\\"rvice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:21.456350 6349 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 18:13:21.456397 6349 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 18:13:21.456403 6349 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 18:13:21.456422 6349 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:21.456430 6349 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 18:13:21.456439 6349 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 18:13:21.456446 6349 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:21.456456 6349 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:21.456457 6349 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 18:13:21.456459 6349 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 18:13:21.456467 6349 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 18:13:21.456475 6349 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 18:13:21.456526 6349 factory.go:656] Stopping watch factory\\\\nI1125 18:13:21.456541 6349 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:21.456546 6349 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 18\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.797971 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.801091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.801132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.801155 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.801183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.801193 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.818145 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.836211 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.848103 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.859689 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.871545 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.885714 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:32Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.903838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.903880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.903891 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.903906 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:32 crc kubenswrapper[4926]: I1125 18:13:32.903916 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:32Z","lastTransitionTime":"2025-11-25T18:13:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.006932 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.006974 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.006985 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.007001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.007012 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.108825 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.108858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.108866 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.108879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.108887 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.211747 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.211785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.211797 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.211813 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.211824 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.314695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.314776 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.314786 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.314802 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.314817 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.328319 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.328442 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.328458 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:33 crc kubenswrapper[4926]: E1125 18:13:33.328675 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:33 crc kubenswrapper[4926]: E1125 18:13:33.328458 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:33 crc kubenswrapper[4926]: E1125 18:13:33.328809 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.416817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.416874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.416882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.416896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.416906 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.519096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.519136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.519147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.519164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.519173 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.620666 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.620693 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.620702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.620715 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.620724 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.651322 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/2.log" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.655168 4926 scope.go:117] "RemoveContainer" containerID="217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84" Nov 25 18:13:33 crc kubenswrapper[4926]: E1125 18:13:33.655451 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.665789 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.680760 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.695227 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.720553 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.723241 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.723289 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.723305 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.723327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.723346 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.735517 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.749619 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.761505 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.774137 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.787460 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.797351 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.807943 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.819737 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.825300 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.825441 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.825529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.825634 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.825713 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.834556 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.848225 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.858572 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.868857 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.889023 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:33Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.927313 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.927351 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.927363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.927418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:33 crc kubenswrapper[4926]: I1125 18:13:33.927430 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:33Z","lastTransitionTime":"2025-11-25T18:13:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.029141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.029182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.029193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.029208 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.029226 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.131237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.131282 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.131295 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.131314 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.131327 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.234487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.234596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.234621 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.234652 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.234676 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.328368 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:34 crc kubenswrapper[4926]: E1125 18:13:34.328681 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.337400 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.337466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.337482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.337511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.337530 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.440184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.440248 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.440262 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.440286 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.440303 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.542834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.542877 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.542886 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.542904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.542915 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.646588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.647186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.647421 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.647725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.647937 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.751552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.751613 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.751625 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.751657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.751670 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.854326 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.854400 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.854419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.854442 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.854492 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.957873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.957933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.957949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.958008 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:34 crc kubenswrapper[4926]: I1125 18:13:34.958027 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:34Z","lastTransitionTime":"2025-11-25T18:13:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.062439 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.062569 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.062587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.062617 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.062642 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.165677 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.165711 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.165720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.165739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.165750 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.268724 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.268784 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.268793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.268807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.268848 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.328831 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.328865 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.328895 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:35 crc kubenswrapper[4926]: E1125 18:13:35.328957 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:35 crc kubenswrapper[4926]: E1125 18:13:35.329036 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:35 crc kubenswrapper[4926]: E1125 18:13:35.329101 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.370776 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.370808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.370817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.370830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.370838 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.472581 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.472606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.472614 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.472626 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.472635 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.575397 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.575433 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.575444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.575460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.575470 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.677180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.677259 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.677279 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.677320 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.677348 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.779554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.779599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.779608 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.779623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.779633 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.882544 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.882593 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.882609 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.882635 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.882651 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.985402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.985450 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.985462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.985477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:35 crc kubenswrapper[4926]: I1125 18:13:35.985486 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:35Z","lastTransitionTime":"2025-11-25T18:13:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.088267 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.088411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.088431 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.088458 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.088484 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.191237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.191281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.191291 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.191305 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.191315 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.293254 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.293289 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.293297 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.293313 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.293343 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.328324 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:36 crc kubenswrapper[4926]: E1125 18:13:36.328480 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.389011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.389047 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.389065 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.389082 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.389093 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: E1125 18:13:36.401010 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:36Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.404495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.404532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.404546 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.404562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.404572 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: E1125 18:13:36.415560 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:36Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.418961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.418981 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.418989 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.419001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.419009 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: E1125 18:13:36.429514 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:36Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.432603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.432642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.432653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.432669 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.432681 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: E1125 18:13:36.442862 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:36Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.446048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.446089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.446098 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.446115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.446126 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: E1125 18:13:36.458816 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:36Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:36 crc kubenswrapper[4926]: E1125 18:13:36.458925 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.463703 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.464319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.464361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.464717 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.464954 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.568066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.568115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.568125 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.568138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.568147 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.670817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.670890 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.670909 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.670934 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.670954 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.774834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.774898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.774914 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.774934 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.774945 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.878792 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.878871 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.878892 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.878921 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.878940 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.982986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.983044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.983060 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.983084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:36 crc kubenswrapper[4926]: I1125 18:13:36.983099 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:36Z","lastTransitionTime":"2025-11-25T18:13:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.086681 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.086762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.086783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.086808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.086828 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.189821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.189888 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.189907 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.189937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.189957 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.293352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.293771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.294119 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.294421 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.294656 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.328659 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.328749 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.328784 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:37 crc kubenswrapper[4926]: E1125 18:13:37.329337 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:37 crc kubenswrapper[4926]: E1125 18:13:37.329097 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:37 crc kubenswrapper[4926]: E1125 18:13:37.329442 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.397604 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.397951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.398080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.398215 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.398353 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.501632 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.501873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.501999 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.502098 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.502194 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.604903 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.605195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.605310 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.605461 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.605602 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.708146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.708177 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.708186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.708206 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.708214 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.811586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.811633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.811644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.811663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.811676 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.913087 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.913121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.913130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.913143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:37 crc kubenswrapper[4926]: I1125 18:13:37.913152 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:37Z","lastTransitionTime":"2025-11-25T18:13:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.015503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.015534 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.015542 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.015555 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.015565 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.118727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.118759 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.118767 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.118783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.118792 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.222054 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.222126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.222146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.222183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.222213 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.325948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.326059 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.326077 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.326115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.326142 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.329345 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:38 crc kubenswrapper[4926]: E1125 18:13:38.329614 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.429924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.430054 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.430080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.430112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.430134 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.533201 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.533283 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.533296 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.533316 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.533348 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.637647 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.637718 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.637741 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.637773 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.637793 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.741588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.741646 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.741664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.741687 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.741699 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.845683 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.845743 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.845758 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.845785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.845800 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.948730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.948792 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.948810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.948837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:38 crc kubenswrapper[4926]: I1125 18:13:38.948855 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:38Z","lastTransitionTime":"2025-11-25T18:13:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.051520 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.051597 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.051614 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.051631 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.051644 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.154541 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.154592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.154607 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.154628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.154641 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.257076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.257127 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.257139 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.257156 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.257166 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.329025 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.329113 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:39 crc kubenswrapper[4926]: E1125 18:13:39.329206 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:39 crc kubenswrapper[4926]: E1125 18:13:39.329408 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.329054 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:39 crc kubenswrapper[4926]: E1125 18:13:39.329532 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.359405 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.359456 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.359468 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.359486 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.359500 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.461846 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.461901 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.461911 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.461927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.461938 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.564454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.564500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.564511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.564529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.564540 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.667188 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.667244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.667261 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.667283 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.667303 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.769659 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.769714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.769723 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.769738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.769750 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.872118 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.872197 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.872206 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.872220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.872230 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.974682 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.974742 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.974751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.974765 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:39 crc kubenswrapper[4926]: I1125 18:13:39.974774 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:39Z","lastTransitionTime":"2025-11-25T18:13:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.040578 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:40 crc kubenswrapper[4926]: E1125 18:13:40.040716 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:40 crc kubenswrapper[4926]: E1125 18:13:40.040762 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:13:56.040749009 +0000 UTC m=+66.426262614 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.077487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.077516 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.077524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.077572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.077584 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.180172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.180227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.180239 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.180252 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.180261 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.282727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.282795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.282812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.282836 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.282858 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.328246 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:40 crc kubenswrapper[4926]: E1125 18:13:40.328350 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.338152 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.352435 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.370877 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.384493 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.386335 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.386416 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.386453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.386472 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.386483 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.405283 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.429076 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.439102 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.448769 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.463340 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.473803 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.482873 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.488193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.488230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.488244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.488263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.488275 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.491554 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.507005 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.519416 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.535933 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.548989 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.560198 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:40Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.590341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.590365 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.590386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.590398 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.590406 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.693006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.693053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.693064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.693082 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.693092 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.795736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.795962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.796784 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.796931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.797006 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.899249 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.899312 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.899327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.899349 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:40 crc kubenswrapper[4926]: I1125 18:13:40.899365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:40Z","lastTransitionTime":"2025-11-25T18:13:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.002634 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.002703 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.002720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.002742 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.002781 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.105486 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.105570 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.105587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.105617 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.105632 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.152594 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.152771 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:14:13.152746247 +0000 UTC m=+83.538259862 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.152843 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.152901 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.152988 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.153039 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:14:13.153029844 +0000 UTC m=+83.538543459 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.153075 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.153187 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:14:13.153156068 +0000 UTC m=+83.538669723 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.208615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.208706 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.208726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.208763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.208793 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.254784 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.254916 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255026 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255057 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255075 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255083 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255104 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255118 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255143 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:14:13.255122568 +0000 UTC m=+83.640636173 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.255167 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:14:13.255152078 +0000 UTC m=+83.640665703 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.312333 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.312393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.312404 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.312419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.312432 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.328787 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.328879 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.328787 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.328939 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.329023 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:41 crc kubenswrapper[4926]: E1125 18:13:41.329240 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.414811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.414865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.414876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.414898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.414912 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.517478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.517510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.517518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.517529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.517538 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.619850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.619899 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.619910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.619925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.619936 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.722343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.722434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.722446 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.722463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.722475 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.825037 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.825078 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.825088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.825104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.825113 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.929003 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.929060 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.929072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.929090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:41 crc kubenswrapper[4926]: I1125 18:13:41.929107 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:41Z","lastTransitionTime":"2025-11-25T18:13:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.032063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.032136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.032151 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.032179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.032198 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.135282 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.135353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.135399 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.135460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.135479 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.238476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.238542 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.238560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.238591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.238615 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.329053 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:42 crc kubenswrapper[4926]: E1125 18:13:42.329360 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.341207 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.341253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.341264 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.341278 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.341320 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.444109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.444179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.444198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.444226 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.444245 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.547760 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.547834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.547852 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.547884 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.547904 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.651829 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.651905 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.651923 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.652016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.652037 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.755558 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.755603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.755612 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.755628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.755638 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.858173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.858368 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.858443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.858503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.858560 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.962073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.962145 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.962214 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.962245 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:42 crc kubenswrapper[4926]: I1125 18:13:42.962265 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:42Z","lastTransitionTime":"2025-11-25T18:13:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.065256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.065310 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.065319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.065341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.065353 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.167830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.167869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.167881 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.167898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.167911 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.270552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.270602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.270686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.270708 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.270766 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.328573 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.328648 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.328716 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:43 crc kubenswrapper[4926]: E1125 18:13:43.328739 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:43 crc kubenswrapper[4926]: E1125 18:13:43.328839 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:43 crc kubenswrapper[4926]: E1125 18:13:43.328915 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.374798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.374862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.374873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.374891 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.374902 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.478073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.478115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.478126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.478141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.478155 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.581108 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.581183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.581205 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.581239 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.581259 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.684709 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.684778 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.684797 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.684822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.684848 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.788052 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.788104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.788114 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.788134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.788147 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.890691 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.890762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.890780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.890806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.890823 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.993885 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.993988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.994015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.994047 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:43 crc kubenswrapper[4926]: I1125 18:13:43.994068 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:43Z","lastTransitionTime":"2025-11-25T18:13:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.098255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.098362 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.098429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.098463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.098488 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.202084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.202164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.202183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.202216 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.202234 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.306219 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.306329 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.306346 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.306409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.306428 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.329023 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:44 crc kubenswrapper[4926]: E1125 18:13:44.329226 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.411033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.411555 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.411575 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.411600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.411619 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.514830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.514881 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.514893 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.514911 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.514923 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.617844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.617896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.617908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.617928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.617944 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.720805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.720851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.720863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.720880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.720892 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.824053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.824089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.824101 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.824117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.824131 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.927662 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.927706 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.927715 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.927731 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:44 crc kubenswrapper[4926]: I1125 18:13:44.927741 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:44Z","lastTransitionTime":"2025-11-25T18:13:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.031318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.031387 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.031400 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.031417 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.031429 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.135098 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.135144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.135157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.135176 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.135187 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.238076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.238143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.238154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.238171 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.238182 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.328266 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.328294 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.328295 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:45 crc kubenswrapper[4926]: E1125 18:13:45.328436 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:45 crc kubenswrapper[4926]: E1125 18:13:45.328483 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:45 crc kubenswrapper[4926]: E1125 18:13:45.328852 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.329100 4926 scope.go:117] "RemoveContainer" containerID="217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84" Nov 25 18:13:45 crc kubenswrapper[4926]: E1125 18:13:45.329302 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.340395 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.340443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.340452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.340462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.340472 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.443103 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.443157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.443168 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.443185 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.443196 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.545945 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.546017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.546044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.546077 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.546100 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.594559 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.606079 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.611184 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.645133 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.649532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.649578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.649588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.649606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.649619 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.676569 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.694848 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.721500 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.731966 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.748148 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.751966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.752025 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.752036 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.752061 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.752076 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.770772 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.785473 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.799997 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.815903 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.832486 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.847077 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.854412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.854448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.854459 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.854473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.854484 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.858998 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.872591 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.888307 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.901177 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:45Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.957213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.957249 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.957259 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.957276 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:45 crc kubenswrapper[4926]: I1125 18:13:45.957285 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:45Z","lastTransitionTime":"2025-11-25T18:13:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.060063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.060115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.060127 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.060148 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.060159 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.162709 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.162742 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.162765 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.162780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.162791 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.264453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.264489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.264500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.264517 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.264528 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.328733 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:46 crc kubenswrapper[4926]: E1125 18:13:46.328908 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.366953 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.366999 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.367007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.367020 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.367028 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.469321 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.469411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.469426 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.469444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.469455 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.551283 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.551357 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.551390 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.551438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.551453 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: E1125 18:13:46.567574 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:46Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.570986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.571025 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.571033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.571048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.571058 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: E1125 18:13:46.583882 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:46Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.588247 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.588298 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.588312 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.588331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.588344 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: E1125 18:13:46.603883 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:46Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.607817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.607865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.607880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.607900 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.607915 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: E1125 18:13:46.619957 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:46Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.624337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.624391 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.624403 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.624418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.624430 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: E1125 18:13:46.637489 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:46Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:46 crc kubenswrapper[4926]: E1125 18:13:46.637596 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.639490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.639524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.639533 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.639549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.639560 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.741750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.741781 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.741794 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.741809 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.741819 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.844782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.844819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.844826 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.844838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.844846 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.947661 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.947695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.947704 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.947718 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:46 crc kubenswrapper[4926]: I1125 18:13:46.947728 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:46Z","lastTransitionTime":"2025-11-25T18:13:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.050825 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.050901 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.050926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.050956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.050977 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.153873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.153918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.153933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.153951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.153967 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.258111 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.258179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.258198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.258228 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.258249 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.329093 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.329207 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.329114 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:47 crc kubenswrapper[4926]: E1125 18:13:47.329418 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:47 crc kubenswrapper[4926]: E1125 18:13:47.329587 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:47 crc kubenswrapper[4926]: E1125 18:13:47.329700 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.360487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.360535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.360554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.360579 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.360614 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.463459 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.463521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.463539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.463567 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.463604 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.565956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.565998 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.566007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.566022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.566033 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.669014 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.669134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.669159 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.669190 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.669212 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.771957 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.772014 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.772037 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.772068 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.772090 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.875497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.875560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.875577 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.875607 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.875624 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.978022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.978063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.978075 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.978091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:47 crc kubenswrapper[4926]: I1125 18:13:47.978101 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:47Z","lastTransitionTime":"2025-11-25T18:13:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.080884 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.080967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.080984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.081005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.081019 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.183056 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.183117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.183130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.183148 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.183159 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.285845 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.285879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.285887 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.285900 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.285909 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.328598 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:48 crc kubenswrapper[4926]: E1125 18:13:48.328735 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.388280 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.388363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.388410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.388437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.388452 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.491189 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.491269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.491295 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.491322 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.491339 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.594653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.594717 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.594734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.594762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.594781 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.697454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.697512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.697531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.697557 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.697577 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.800338 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.800439 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.800476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.800506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.800526 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.902986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.903339 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.903457 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.903587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:48 crc kubenswrapper[4926]: I1125 18:13:48.903680 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:48Z","lastTransitionTime":"2025-11-25T18:13:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.006340 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.006383 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.006391 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.006405 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.006415 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.109979 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.110034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.110046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.110062 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.110071 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.211824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.211870 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.211882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.211896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.211906 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.314725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.314782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.314806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.314832 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.314844 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.328229 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.328229 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:49 crc kubenswrapper[4926]: E1125 18:13:49.328446 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.328262 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:49 crc kubenswrapper[4926]: E1125 18:13:49.328613 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:49 crc kubenswrapper[4926]: E1125 18:13:49.328628 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.417998 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.418048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.418058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.418078 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.418090 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.521576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.521629 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.521638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.521654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.521666 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.624327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.624386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.624396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.624415 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.624455 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.726616 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.726686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.726707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.726739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.726761 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.829799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.829881 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.829905 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.829937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.829959 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.932104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.932140 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.932149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.932162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:49 crc kubenswrapper[4926]: I1125 18:13:49.932171 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:49Z","lastTransitionTime":"2025-11-25T18:13:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.035084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.035564 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.035721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.035965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.036175 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.140027 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.140074 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.140084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.140103 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.140113 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.242515 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.242557 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.242565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.242581 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.242592 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.328677 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:50 crc kubenswrapper[4926]: E1125 18:13:50.328834 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.345880 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.345982 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.346009 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.346018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.346034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.346044 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.361178 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.372944 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.384515 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.401539 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.421186 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.437058 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.447642 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.449170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.449208 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.449223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.449244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.449259 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.467222 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.481729 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.496253 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.511878 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.527086 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.540849 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.552497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.552548 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.552563 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.552583 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.552597 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.554058 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.577087 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.591649 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.625754 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:50Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.654615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.654665 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.654677 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.654696 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.654708 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.757346 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.757434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.757454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.757477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.757492 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.860294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.860408 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.860433 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.860456 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.860504 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.962522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.962576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.962592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.962609 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:50 crc kubenswrapper[4926]: I1125 18:13:50.962619 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:50Z","lastTransitionTime":"2025-11-25T18:13:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.064930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.065010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.065024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.065051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.065065 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.168286 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.168351 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.168364 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.168413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.168428 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.270528 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.270608 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.270631 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.270660 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.270680 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.328227 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.328281 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.328249 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:51 crc kubenswrapper[4926]: E1125 18:13:51.328446 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:51 crc kubenswrapper[4926]: E1125 18:13:51.328900 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:51 crc kubenswrapper[4926]: E1125 18:13:51.328979 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.373109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.373143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.373154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.373169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.373180 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.476858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.476924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.476936 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.476962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.476975 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.579524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.579578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.579592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.579611 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.579625 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.683326 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.683392 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.683407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.683423 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.683435 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.785191 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.785231 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.785241 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.785258 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.785272 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.888494 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.888553 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.888568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.888586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.888598 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.995642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.995725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.995815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.995859 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:51 crc kubenswrapper[4926]: I1125 18:13:51.995905 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:51Z","lastTransitionTime":"2025-11-25T18:13:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.098795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.098838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.098849 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.098868 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.098880 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.202343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.202500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.202523 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.202552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.202574 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.305583 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.305640 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.305650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.305668 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.305681 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.329800 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:52 crc kubenswrapper[4926]: E1125 18:13:52.329975 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.408430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.408482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.408495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.408514 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.408526 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.511092 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.511135 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.511145 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.511158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.511170 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.613901 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.613953 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.613966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.613984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.613998 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.716303 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.716347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.716359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.716396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.716409 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.818939 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.819352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.819579 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.819749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.819899 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.922986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.923236 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.923308 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.923391 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:52 crc kubenswrapper[4926]: I1125 18:13:52.923520 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:52Z","lastTransitionTime":"2025-11-25T18:13:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.025696 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.026016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.026152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.026247 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.026338 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.129205 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.129283 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.129302 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.129332 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.129351 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.232551 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.232621 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.232639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.232665 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.232681 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.328693 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.328759 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.328824 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:53 crc kubenswrapper[4926]: E1125 18:13:53.329791 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:53 crc kubenswrapper[4926]: E1125 18:13:53.329859 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:53 crc kubenswrapper[4926]: E1125 18:13:53.329933 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.335804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.335876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.335895 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.335947 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.335964 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.439004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.439089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.439106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.439134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.439157 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.542530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.543085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.543164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.543306 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.543423 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.646502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.646937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.647038 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.647146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.647286 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.750970 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.751131 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.751154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.751268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.751291 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.854772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.854822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.854833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.854847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.854857 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.963139 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.963798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.963833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.964225 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:53 crc kubenswrapper[4926]: I1125 18:13:53.964246 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:53Z","lastTransitionTime":"2025-11-25T18:13:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.066917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.066964 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.066973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.066988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.066998 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.169048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.169095 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.169107 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.169123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.169134 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.271926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.271970 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.271990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.272012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.272030 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.329441 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:54 crc kubenswrapper[4926]: E1125 18:13:54.329696 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.375219 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.375273 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.375286 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.375307 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.375320 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.478633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.478707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.478730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.478761 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.478782 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.582587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.582647 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.582659 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.582679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.582691 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.686676 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.686761 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.686790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.686832 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.686859 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.789713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.789800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.789815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.789831 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.789843 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.893193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.893274 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.893290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.893341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.893357 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.996831 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.996893 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.996909 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.996959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:54 crc kubenswrapper[4926]: I1125 18:13:54.996978 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:54Z","lastTransitionTime":"2025-11-25T18:13:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.099541 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.099570 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.099578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.099591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.099599 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.202128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.202180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.202192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.202207 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.202219 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.304152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.304194 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.304202 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.304216 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.304224 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.329001 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.329037 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.329046 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:55 crc kubenswrapper[4926]: E1125 18:13:55.329100 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:55 crc kubenswrapper[4926]: E1125 18:13:55.329173 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:55 crc kubenswrapper[4926]: E1125 18:13:55.329293 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.406491 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.406526 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.406538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.406578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.406590 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.508328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.508400 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.508411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.508425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.508433 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.610466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.610507 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.610516 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.610530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.610539 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.712431 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.713040 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.713058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.713083 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.713097 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.816138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.816179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.816189 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.816204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.816215 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.919218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.919271 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.919283 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.919305 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:55 crc kubenswrapper[4926]: I1125 18:13:55.919319 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:55Z","lastTransitionTime":"2025-11-25T18:13:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.026736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.026783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.026796 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.026813 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.026825 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.126573 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.126726 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.126790 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:14:28.126770863 +0000 UTC m=+98.512284468 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.128864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.128924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.128949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.128990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.129013 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.231750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.231977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.232046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.232111 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.232175 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.328754 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.329075 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.333459 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.333499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.333511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.333529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.333541 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.435892 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.435929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.435940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.435955 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.435964 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.538121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.538182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.538205 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.538235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.538257 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.640876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.640941 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.640966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.640994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.641016 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.713654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.713693 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.713704 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.713718 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.713727 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.725080 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:56Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.729318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.729351 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.729359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.729391 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.729401 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.740077 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:56Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.742990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.743091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.743155 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.743219 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.743281 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.753867 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:56Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.757136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.757164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.757172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.757186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.757195 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.767814 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:56Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.770978 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.771003 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.771012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.771029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.771039 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.781679 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:56Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:56 crc kubenswrapper[4926]: E1125 18:13:56.781806 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.783271 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.783391 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.783485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.783570 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.783664 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.885750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.885790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.885798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.885816 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.885827 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.988091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.988146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.988162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.988186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:56 crc kubenswrapper[4926]: I1125 18:13:56.988204 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:56Z","lastTransitionTime":"2025-11-25T18:13:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.092742 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.092788 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.092802 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.092821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.092835 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.195453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.195496 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.195504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.195517 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.195526 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.298610 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.298643 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.298654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.298669 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.298680 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.328769 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:57 crc kubenswrapper[4926]: E1125 18:13:57.328915 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.329353 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.329463 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:57 crc kubenswrapper[4926]: E1125 18:13:57.329540 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:57 crc kubenswrapper[4926]: E1125 18:13:57.329624 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.401040 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.401100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.401116 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.401135 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.401148 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.503938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.503976 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.503985 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.504001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.504010 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.606410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.606444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.606454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.606469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.606479 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.709249 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.709296 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.709308 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.709325 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.709336 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.727359 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/0.log" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.727406 4926 generic.go:334] "Generic (PLEG): container finished" podID="78af77fa-0071-48e9-8b78-bdd92abfb013" containerID="14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17" exitCode=1 Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.727433 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerDied","Data":"14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.727792 4926 scope.go:117] "RemoveContainer" containerID="14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.747572 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.758970 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.769959 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.781252 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.789658 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.798973 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.809277 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.811622 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.811648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.811656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.811671 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.811679 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.822033 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"2025-11-25T18:13:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2\\\\n2025-11-25T18:13:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2 to /host/opt/cni/bin/\\\\n2025-11-25T18:13:12Z [verbose] multus-daemon started\\\\n2025-11-25T18:13:12Z [verbose] Readiness Indicator file check\\\\n2025-11-25T18:13:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.837815 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.857693 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.871833 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.888449 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.903126 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.913886 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.913919 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.913927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.913939 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.913948 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:57Z","lastTransitionTime":"2025-11-25T18:13:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.916432 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.936539 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.951325 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.965936 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:57 crc kubenswrapper[4926]: I1125 18:13:57.979831 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:57Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.016326 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.016359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.016368 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.016400 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.016413 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.118748 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.118820 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.118837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.118862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.118880 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.221780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.221857 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.221878 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.221904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.221928 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.325837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.325896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.325909 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.325949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.325961 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.329232 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:13:58 crc kubenswrapper[4926]: E1125 18:13:58.329355 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.429008 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.429079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.429096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.429125 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.429147 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.531414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.531497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.531511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.531536 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.531553 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.636419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.636466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.636477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.636497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.636507 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.734725 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/0.log" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.734796 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerStarted","Data":"c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.738178 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.738222 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.738233 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.738251 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.738264 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.752936 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.769500 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.791534 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.812521 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"2025-11-25T18:13:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2\\\\n2025-11-25T18:13:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2 to /host/opt/cni/bin/\\\\n2025-11-25T18:13:12Z [verbose] multus-daemon started\\\\n2025-11-25T18:13:12Z [verbose] Readiness Indicator file check\\\\n2025-11-25T18:13:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.831532 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.840904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.840974 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.840988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.841015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.841034 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.852029 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.865060 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.876005 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.888360 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.899451 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.912107 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.923837 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.934175 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.943409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.943449 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.943459 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.943474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.943485 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:58Z","lastTransitionTime":"2025-11-25T18:13:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.943428 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.961875 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.975039 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.988190 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:58 crc kubenswrapper[4926]: I1125 18:13:58.998433 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:13:58Z is after 2025-08-24T17:21:41Z" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.046020 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.046097 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.046112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.046140 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.046157 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.148989 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.149356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.149593 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.149776 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.150102 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.252522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.252827 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.252976 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.253100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.253258 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.328807 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.328897 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.329199 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:13:59 crc kubenswrapper[4926]: E1125 18:13:59.329425 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:13:59 crc kubenswrapper[4926]: E1125 18:13:59.329576 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:13:59 crc kubenswrapper[4926]: E1125 18:13:59.329694 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.356587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.356847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.356948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.357046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.357125 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.459643 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.459679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.459688 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.459701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.459711 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.562445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.562487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.562499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.562516 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.562527 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.664793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.664830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.664841 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.664857 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.664945 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.766923 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.767443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.767520 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.767611 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.767681 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.870227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.870506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.870606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.870702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.870785 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.973633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.973849 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.973962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.974052 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:13:59 crc kubenswrapper[4926]: I1125 18:13:59.974143 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:13:59Z","lastTransitionTime":"2025-11-25T18:13:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.076928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.077012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.077035 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.077064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.077083 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.179764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.179811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.179820 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.179835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.179846 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.281806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.281885 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.281898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.281928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.281938 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.329228 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:00 crc kubenswrapper[4926]: E1125 18:14:00.329339 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.330440 4926 scope.go:117] "RemoveContainer" containerID="217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.349350 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.373257 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.384566 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.398188 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.408867 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.420223 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.428504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.428548 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.428567 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.428589 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.428604 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.435828 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"2025-11-25T18:13:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2\\\\n2025-11-25T18:13:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2 to /host/opt/cni/bin/\\\\n2025-11-25T18:13:12Z [verbose] multus-daemon started\\\\n2025-11-25T18:13:12Z [verbose] Readiness Indicator file check\\\\n2025-11-25T18:13:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.448308 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.460508 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.472772 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.483568 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.494973 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.508349 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.521222 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.531005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.531058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.531070 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.531087 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.531098 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.535213 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.545279 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.569157 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.579333 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.632823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.632868 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.632880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.632896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.632906 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.734694 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.734719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.734729 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.734741 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.734750 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.746528 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/2.log" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.750417 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.750881 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.761766 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.772533 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.787755 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.804526 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"2025-11-25T18:13:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2\\\\n2025-11-25T18:13:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2 to /host/opt/cni/bin/\\\\n2025-11-25T18:13:12Z [verbose] multus-daemon started\\\\n2025-11-25T18:13:12Z [verbose] Readiness Indicator file check\\\\n2025-11-25T18:13:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.818397 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.838553 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.840805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.840894 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.840906 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.840942 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.840953 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.851710 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.867766 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.879987 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.890341 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.903552 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.916341 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.931211 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.942106 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.943912 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.943984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.944007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.944028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.944041 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:00Z","lastTransitionTime":"2025-11-25T18:14:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.960023 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:14:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.973520 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:00 crc kubenswrapper[4926]: I1125 18:14:00.989517 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:00Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.002942 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.047362 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.047469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.047485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.047510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.047528 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.150102 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.150147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.150159 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.150177 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.150188 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.252833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.252875 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.252883 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.252900 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.252914 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.329024 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.329105 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.329024 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:01 crc kubenswrapper[4926]: E1125 18:14:01.329201 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:01 crc kubenswrapper[4926]: E1125 18:14:01.329283 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:01 crc kubenswrapper[4926]: E1125 18:14:01.329409 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.356112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.356158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.356171 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.356192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.356206 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.459317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.459749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.459849 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.459991 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.460175 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.564166 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.564221 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.564233 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.564253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.564269 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.668361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.668457 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.668477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.668508 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.668528 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.756316 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/3.log" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.757354 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/2.log" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.760295 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" exitCode=1 Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.760334 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.760386 4926 scope.go:117] "RemoveContainer" containerID="217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.761807 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:14:01 crc kubenswrapper[4926]: E1125 18:14:01.762113 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.770586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.770731 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.770814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.770906 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.770993 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.774549 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.791287 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.826225 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.857700 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.873595 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.873631 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.873643 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.873656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.873666 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.879945 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217b500a8dec0be8e0d5852e04c67fdebb465488a9c0e635e3d76e7c38f39e84\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:32Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI1125 18:13:32.310775 6558 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.310837 6558 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 18:13:32.311092 6558 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311322 6558 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 18:13:32.311721 6558 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 18:13:32.311820 6558 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 18:13:32.311853 6558 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 18:13:32.311912 6558 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 18:13:32.311956 6558 factory.go:656] Stopping watch factory\\\\nI1125 18:13:32.311993 6558 ovnkube.go:599] Stopped ovnkube\\\\nI1125 18:13:32.312041 6558 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 18:13:32.312078 6558 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 18:13:32.312107 6558 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 18:13:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:14:01Z\\\",\\\"message\\\":\\\"ess:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1125 18:14:01.177515 6908 lb_config.go:1031] Cluster endpoints for openshift-cluster-version/cluster-version-operator for network=default are: map[]\\\\nI1125 18:14:01.177528 6908 services_controller.go:443] Built service openshift-cluster-version/cluster-version-operator LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.182\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:9099, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1125 18:14:01.177538 6908 services_controller.go:444] Built service openshift-cluster-version/cluster-version-operator LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1125 18:14:01.176749 6908 services_controller.go:360] Finished syncing service metrics on namespace openshift-kube-controller-manager-operator for network=default : 4.640248ms\\\\nF1125 18:14:01.176942 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:14:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.889349 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.911276 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.924821 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.939868 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.955123 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.971202 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.975673 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.975706 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.975715 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.975736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.975747 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:01Z","lastTransitionTime":"2025-11-25T18:14:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:01 crc kubenswrapper[4926]: I1125 18:14:01.985616 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"2025-11-25T18:13:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2\\\\n2025-11-25T18:13:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2 to /host/opt/cni/bin/\\\\n2025-11-25T18:13:12Z [verbose] multus-daemon started\\\\n2025-11-25T18:13:12Z [verbose] Readiness Indicator file check\\\\n2025-11-25T18:13:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:01Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.007994 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.026541 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.039412 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.050835 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.062258 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.072228 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.077809 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.077847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.077858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.077874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.077886 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.180785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.180835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.180849 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.180865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.180876 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.283523 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.283570 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.283580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.283599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.283610 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.328715 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:02 crc kubenswrapper[4926]: E1125 18:14:02.328864 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.386948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.387000 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.387016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.387041 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.387057 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.490714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.490773 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.490792 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.490818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.490835 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.593238 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.593276 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.593285 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.593301 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.593310 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.695517 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.695552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.695561 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.695577 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.695585 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.765709 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/3.log" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.769504 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:14:02 crc kubenswrapper[4926]: E1125 18:14:02.769696 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.784884 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.799684 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.799759 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.799814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.799847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.799869 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.806026 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.820316 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.830769 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.855156 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:14:01Z\\\",\\\"message\\\":\\\"ess:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1125 18:14:01.177515 6908 lb_config.go:1031] Cluster endpoints for openshift-cluster-version/cluster-version-operator for network=default are: map[]\\\\nI1125 18:14:01.177528 6908 services_controller.go:443] Built service openshift-cluster-version/cluster-version-operator LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.182\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:9099, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1125 18:14:01.177538 6908 services_controller.go:444] Built service openshift-cluster-version/cluster-version-operator LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1125 18:14:01.176749 6908 services_controller.go:360] Finished syncing service metrics on namespace openshift-kube-controller-manager-operator for network=default : 4.640248ms\\\\nF1125 18:14:01.176942 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:14:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.870238 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.887427 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"2025-11-25T18:13:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2\\\\n2025-11-25T18:13:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2 to /host/opt/cni/bin/\\\\n2025-11-25T18:13:12Z [verbose] multus-daemon started\\\\n2025-11-25T18:13:12Z [verbose] Readiness Indicator file check\\\\n2025-11-25T18:13:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.902411 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.903018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.903056 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.903069 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.903087 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.903100 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:02Z","lastTransitionTime":"2025-11-25T18:14:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.927767 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.936850 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.946020 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.955614 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.967171 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.980368 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:02 crc kubenswrapper[4926]: I1125 18:14:02.993069 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:02Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.003845 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:03Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.005811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.005838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.005847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.005861 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.005872 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.017081 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:03Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.027880 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:03Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.130093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.130143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.130156 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.130182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.130195 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.233021 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.233061 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.233071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.233086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.233096 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.328471 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.328535 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.328471 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:03 crc kubenswrapper[4926]: E1125 18:14:03.328659 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:03 crc kubenswrapper[4926]: E1125 18:14:03.329106 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:03 crc kubenswrapper[4926]: E1125 18:14:03.329637 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.335105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.335135 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.335146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.335161 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.335171 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.437924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.437960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.437970 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.437986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.437994 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.540480 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.540531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.540543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.540561 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.540574 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.643650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.643706 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.643717 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.643734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.643748 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.746837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.746892 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.746906 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.746926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.746944 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.850565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.850609 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.850618 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.850634 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.850646 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.954137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.954295 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.954765 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.955045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:03 crc kubenswrapper[4926]: I1125 18:14:03.956182 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:03Z","lastTransitionTime":"2025-11-25T18:14:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.059676 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.059771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.059789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.059816 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.059830 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.163952 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.164015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.164031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.164057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.164074 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.267738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.267784 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.267795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.267812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.267844 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.328737 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:04 crc kubenswrapper[4926]: E1125 18:14:04.328994 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.370365 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.370465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.370485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.370508 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.370522 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.473742 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.473820 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.473832 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.473851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.473863 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.577456 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.577556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.577584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.577622 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.577653 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.680863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.680904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.680913 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.680927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.680938 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.783675 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.783721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.783733 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.783753 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.783765 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.886754 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.886797 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.886810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.886830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.886842 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.989296 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.989365 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.989406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.989434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:04 crc kubenswrapper[4926]: I1125 18:14:04.989550 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:04Z","lastTransitionTime":"2025-11-25T18:14:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.092685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.092736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.092749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.092770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.092783 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.196700 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.196774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.196793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.196823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.196846 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.300408 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.300489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.300499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.300515 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.300526 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.329715 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.329836 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.329788 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:05 crc kubenswrapper[4926]: E1125 18:14:05.330008 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:05 crc kubenswrapper[4926]: E1125 18:14:05.330187 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:05 crc kubenswrapper[4926]: E1125 18:14:05.330350 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.403324 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.403400 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.403410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.403426 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.403435 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.505898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.505933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.505944 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.505960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.505972 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.607994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.608079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.608123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.608136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.608144 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.711208 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.711262 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.711274 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.711290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.711301 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.813490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.813548 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.813560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.813578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.813594 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.915789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.915856 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.915866 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.915888 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:05 crc kubenswrapper[4926]: I1125 18:14:05.915900 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:05Z","lastTransitionTime":"2025-11-25T18:14:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.018660 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.018713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.018727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.018744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.018757 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.122734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.122882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.122910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.122950 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.122975 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.226640 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.226747 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.226787 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.226836 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.226864 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.328733 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:06 crc kubenswrapper[4926]: E1125 18:14:06.328900 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.330737 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.330825 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.330845 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.330928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.330960 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.433887 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.433950 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.433969 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.433995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.434015 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.537677 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.537741 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.537754 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.537776 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.537790 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.641598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.641667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.641686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.641715 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.641732 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.745875 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.745957 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.745978 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.746010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.746030 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.850198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.850253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.850267 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.850288 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.850309 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.954097 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.954147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.954158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.954180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:06 crc kubenswrapper[4926]: I1125 18:14:06.954190 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:06Z","lastTransitionTime":"2025-11-25T18:14:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.057862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.057936 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.057956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.057985 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.058004 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.062655 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.062712 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.062726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.062750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.062769 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.092277 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:07Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.098045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.098082 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.098094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.098108 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.098118 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.120358 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:07Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.130930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.131408 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.131455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.131494 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.131520 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.151008 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:07Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.157316 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.157524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.157550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.157584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.157606 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.179778 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:07Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.187137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.187184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.187198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.187220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.187234 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.203994 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:07Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.204277 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.207090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.207171 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.207188 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.207236 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.207254 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.310217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.310257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.310269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.310288 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.310300 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.328344 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.328449 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.328544 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.328618 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.328775 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:07 crc kubenswrapper[4926]: E1125 18:14:07.328958 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.413785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.413883 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.413927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.413952 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.413965 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.518466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.518556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.518588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.518622 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.518645 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.622177 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.622278 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.622305 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.622339 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.622365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.727540 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.727623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.727647 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.727683 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.727708 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.831576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.831638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.831649 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.831669 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.831681 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.934879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.934951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.934969 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.935053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:07 crc kubenswrapper[4926]: I1125 18:14:07.935074 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:07Z","lastTransitionTime":"2025-11-25T18:14:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.038708 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.038764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.038774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.038808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.038819 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.142337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.142451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.142507 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.142534 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.142548 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.246023 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.246078 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.246091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.246110 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.246121 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.329196 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:08 crc kubenswrapper[4926]: E1125 18:14:08.329561 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.349089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.349192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.349226 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.349260 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.349281 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.452760 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.452829 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.452848 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.452881 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.452903 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.555580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.555652 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.555667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.555686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.555699 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.658989 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.659046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.659063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.659093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.659116 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.762147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.762500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.762615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.762715 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.762795 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.868985 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.869029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.869038 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.869052 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.869061 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.971360 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.971414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.971455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.971474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:08 crc kubenswrapper[4926]: I1125 18:14:08.971484 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:08Z","lastTransitionTime":"2025-11-25T18:14:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.074029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.074066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.074076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.074090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.074101 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.176482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.176534 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.176551 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.176628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.176649 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.279591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.279647 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.279660 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.279680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.279695 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.328976 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.329009 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:09 crc kubenswrapper[4926]: E1125 18:14:09.329168 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.329295 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:09 crc kubenswrapper[4926]: E1125 18:14:09.329464 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:09 crc kubenswrapper[4926]: E1125 18:14:09.329565 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.382409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.382454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.382464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.382482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.382519 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.487617 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.487692 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.487717 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.487752 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.487776 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.591642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.591699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.591715 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.591736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.591750 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.695466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.695535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.695549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.695568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.695579 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.798838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.798965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.799052 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.799154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.799247 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.903999 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.904065 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.904084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.904114 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:09 crc kubenswrapper[4926]: I1125 18:14:09.904135 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:09Z","lastTransitionTime":"2025-11-25T18:14:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.006443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.006479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.006490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.006506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.006520 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.109877 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.109923 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.109933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.109949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.109958 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.212835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.212895 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.212908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.212932 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.212949 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.315432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.315478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.315487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.315507 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.315520 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.328296 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:10 crc kubenswrapper[4926]: E1125 18:14:10.328506 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.346266 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hnjr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55918e3c-e590-4127-95dc-759990298fca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38a3ddcc93000897ef1f3f9b94a4d2ad6a85effa184ed94acb86f35eb8d17e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gnm7r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:11Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hnjr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.372347 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9744c7b3-ce7a-4b4c-9ded-8ea16ed17f63\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dc86f875680781b56ec6f56fea5337ee886e3858572324f7f6a2327c5746b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://447915ef97890832fd8861883a706b3cff800d1b64e2f9d6f7f168d495ceb51c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef537aac0c8a4d87c9334f696851256430662ae03de28df67076c253344c1839\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f57b213b5112b2ffe0e150519f8b8ad4a3e197cad79dad4030ec14d4e78f164\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e106ac47dfc281938fbdef6f8830196ecc8d6f51f5b35c74b58ce363326c8fc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b539e731515b5adeb6419cff7c61c30a61eed0c23d976e7b28239dca2d17d69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0c90b0318a0a24bff839725c8828409b0297426c39c7cccaa8726310ea1c1ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b36d589c1ed3a00773d94aadec47fc05922d2adb88c962a6ed59405dbcedfe12\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.391441 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38faccac-c57a-42c5-9951-e3289d3aa666\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9f081c51a403e2076ddc583abf6acd5dfcc0c305008756fc09ce55a2decca1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1b8e9dc1a2d8da861f50aa84fd3959d6b7a2572291fa2b779d6a9b8405b7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://00359d2b8a7ff40d8ef3178b39324459a04ce38089b159e951c9fad5c1dc886e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5def58562a3641ecf89b3fa23b9eb5aa3c06b947066cb747d7aec8d9c3c47784\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.416433 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad3abc9282f72b4ec5f4c25bfe3b5b8d932791a4617315608a5a531d34c53b32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c3ab2e4e6da0a949bd6fdd7bc875a9fa344f77be4d629f2bfe2782e61874b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.419018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.419086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.419110 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.419143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.419171 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.440760 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c9499d2f822c8f0edf14c2d06efe254174a74de136d0516d2f11f525b8ceaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.460971 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.479707 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-49qhh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78af77fa-0071-48e9-8b78-bdd92abfb013\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:13:57Z\\\",\\\"message\\\":\\\"2025-11-25T18:13:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2\\\\n2025-11-25T18:13:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a885f15f-08f8-400a-ad58-fc4a53247da2 to /host/opt/cni/bin/\\\\n2025-11-25T18:13:12Z [verbose] multus-daemon started\\\\n2025-11-25T18:13:12Z [verbose] Readiness Indicator file check\\\\n2025-11-25T18:13:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkbf2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-49qhh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.496150 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d19408d-6957-4cfa-8ac3-f286155b4c2d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://746b6535592ba10f36d3753720cfd1ae59099e66b15027d5ae62b1c058ce3480\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2cd5a4066d5de3c13a2b09922679a4953084aeabdcc06b3c1de272da999242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ef4985966d7e6424aa700dadceb6b835b8ba5ef2f00a02a568290375c41c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0adb87ccb4295e0de30e63494da51ecb5fd39dc3666ca289f7a43a6f7dd6c6dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b435bb45f2fe786e4cc498b4887ba1ee25efec45c739b5105f563f941f79a14a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7dc238d505faf5ccb0f345f842fc77e19a6c3ed7b6e392badd6c00e377827f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78abfb414f8768151db23ebbfe5e73dc28c82b0c1c8333a0f2a6d7fe175eb01d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vzkx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-r9lmm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.514210 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T18:13:04Z\\\",\\\"message\\\":\\\"W1125 18:12:53.638077 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 18:12:53.638426 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764094373 cert, and key in /tmp/serving-cert-246996891/serving-signer.crt, /tmp/serving-cert-246996891/serving-signer.key\\\\nI1125 18:12:53.874325 1 observer_polling.go:159] Starting file observer\\\\nW1125 18:12:53.880532 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 18:12:53.880897 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 18:12:53.883772 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-246996891/tls.crt::/tmp/serving-cert-246996891/tls.key\\\\\\\"\\\\nF1125 18:13:04.120647 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:53Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:12:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.522703 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.522775 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.522789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.522813 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.522828 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.529882 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.544735 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5655ebe9-673e-4e9e-ad75-edf6c92bddb7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f2819bdbc28e816c4c398787d497b3a271a109ad169549e9ed5e5ecc71ebca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tk2ml\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skdzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.559867 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d78292c0-9831-49d7-a282-63d27069e6f9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfa49c78089602e647527be31d232cdd3bbe8433fffec1f9a98f92c8f271877a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://638643cc5185f99dbbdf2727a82bdbc9cbf1e1807c17afef3004bf1fd8486024\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dptzn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-b7k6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.575076 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dsbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-2mwzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.594710 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.615080 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.626301 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.626358 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.626389 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.626416 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.626433 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.632693 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.645498 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.684888 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:14:01Z\\\",\\\"message\\\":\\\"ess:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1125 18:14:01.177515 6908 lb_config.go:1031] Cluster endpoints for openshift-cluster-version/cluster-version-operator for network=default are: map[]\\\\nI1125 18:14:01.177528 6908 services_controller.go:443] Built service openshift-cluster-version/cluster-version-operator LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.182\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:9099, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1125 18:14:01.177538 6908 services_controller.go:444] Built service openshift-cluster-version/cluster-version-operator LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1125 18:14:01.176749 6908 services_controller.go:360] Finished syncing service metrics on namespace openshift-kube-controller-manager-operator for network=default : 4.640248ms\\\\nF1125 18:14:01.176942 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:14:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:10Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.729664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.729714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.729725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.729743 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.729756 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.832060 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.832107 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.832120 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.832144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.832159 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.935014 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.935061 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.935073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.935091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:10 crc kubenswrapper[4926]: I1125 18:14:10.935102 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:10Z","lastTransitionTime":"2025-11-25T18:14:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.039290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.039406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.039427 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.039458 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.039479 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.142726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.142771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.142793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.142813 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.142825 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.248815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.248871 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.248886 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.248908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.248922 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.328327 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.328361 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.328554 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:11 crc kubenswrapper[4926]: E1125 18:14:11.328683 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:11 crc kubenswrapper[4926]: E1125 18:14:11.328814 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:11 crc kubenswrapper[4926]: E1125 18:14:11.328956 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.352413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.352463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.352479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.352502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.352518 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.456762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.456810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.456821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.456839 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.456853 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.560283 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.560335 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.560350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.560401 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.560418 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.663948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.664330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.664396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.664432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.664447 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.768137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.768184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.768196 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.768235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.768247 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.871319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.871445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.871457 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.871475 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.871488 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.974238 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.974316 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.974343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.974409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:11 crc kubenswrapper[4926]: I1125 18:14:11.974430 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:11Z","lastTransitionTime":"2025-11-25T18:14:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.077973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.078067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.078086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.078116 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.078134 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.181754 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.181833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.181851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.181888 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.181909 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.285719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.285808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.285828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.285862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.285889 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.328570 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:12 crc kubenswrapper[4926]: E1125 18:14:12.328806 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.389113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.389190 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.389210 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.389240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.389266 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.493250 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.493309 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.493324 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.493348 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.493362 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.597092 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.597153 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.597166 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.597186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.597198 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.701052 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.701136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.701158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.701194 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.701216 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.805358 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.805454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.805474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.805501 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.805523 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.909751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.909833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.909851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.909881 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:12 crc kubenswrapper[4926]: I1125 18:14:12.909902 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:12Z","lastTransitionTime":"2025-11-25T18:14:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.013924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.013990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.014004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.014021 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.014031 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.118044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.118115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.118128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.118152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.118168 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.221487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.221558 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.221577 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.221608 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.221629 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.251081 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.251317 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.251460 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.251418259 +0000 UTC m=+147.636931924 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.251519 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.251625 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.251603814 +0000 UTC m=+147.637117439 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.251627 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.251863 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.251933 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.251922533 +0000 UTC m=+147.637436358 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.324411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.324466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.324478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.324498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.324511 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.329206 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.329437 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.329534 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.329705 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.329245 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.329937 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.352955 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.353052 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353249 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353280 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353302 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353353 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353428 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.353362038 +0000 UTC m=+147.738875653 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353445 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353474 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:14:13 crc kubenswrapper[4926]: E1125 18:14:13.353577 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.353544103 +0000 UTC m=+147.739057888 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.426895 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.427407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.427416 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.427430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.427442 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.529470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.529528 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.529539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.529563 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.529583 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.631955 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.631998 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.632012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.632029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.632042 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.734890 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.734949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.734965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.734986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.735002 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.838468 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.838521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.838531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.838549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.838559 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.941079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.941113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.941141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.941156 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:13 crc kubenswrapper[4926]: I1125 18:14:13.941164 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:13Z","lastTransitionTime":"2025-11-25T18:14:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.044844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.044892 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.044900 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.044918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.044927 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.148703 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.148739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.148750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.148764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.148771 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.252225 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.252302 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.252324 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.252772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.253053 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.328602 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:14 crc kubenswrapper[4926]: E1125 18:14:14.329123 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.329276 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:14:14 crc kubenswrapper[4926]: E1125 18:14:14.329500 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.356015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.356086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.356105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.356133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.356147 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.460424 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.460467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.460476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.460493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.460504 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.564470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.564565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.564596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.564628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.564647 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.667105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.667178 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.667196 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.667223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.667242 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.770538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.770603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.770621 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.770651 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.770670 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.872599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.872638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.872649 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.872663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.872672 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.975613 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.975655 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.975665 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.975681 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:14 crc kubenswrapper[4926]: I1125 18:14:14.975695 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:14Z","lastTransitionTime":"2025-11-25T18:14:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.078966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.079008 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.079017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.079034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.079044 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.182448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.182503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.182517 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.182537 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.182551 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.285633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.286648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.286692 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.286726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.286743 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.328605 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.328683 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.328854 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:15 crc kubenswrapper[4926]: E1125 18:14:15.329053 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:15 crc kubenswrapper[4926]: E1125 18:14:15.329203 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:15 crc kubenswrapper[4926]: E1125 18:14:15.329336 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.342979 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.389562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.389604 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.389615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.389631 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.389640 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.491998 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.492081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.492100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.492129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.492152 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.597893 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.598063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.598101 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.598133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.598152 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.703104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.703202 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.703229 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.703264 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.703292 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.807255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.807334 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.807358 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.807415 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.807434 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.910467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.910527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.910543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.910568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:15 crc kubenswrapper[4926]: I1125 18:14:15.910585 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:15Z","lastTransitionTime":"2025-11-25T18:14:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.013921 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.014580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.014603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.014623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.014633 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.117653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.117756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.117784 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.117821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.117843 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.221066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.221121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.221133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.221154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.221171 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.325021 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.325090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.325104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.325128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.325143 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.329357 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:16 crc kubenswrapper[4926]: E1125 18:14:16.329691 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.430252 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.430311 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.430330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.430418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.430441 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.534074 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.534145 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.534158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.534177 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.534229 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.638592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.638678 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.638698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.638730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.638752 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.741908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.741995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.742019 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.742057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.742081 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.845833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.845877 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.845892 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.845913 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.845926 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.948606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.948691 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.948713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.948749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:16 crc kubenswrapper[4926]: I1125 18:14:16.948775 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:16Z","lastTransitionTime":"2025-11-25T18:14:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.052181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.052225 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.052235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.052249 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.052261 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.155886 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.155949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.155961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.155982 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.155993 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.259349 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.259448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.259464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.259493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.259512 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.265566 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.265603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.265616 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.265632 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.265643 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.289029 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.295869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.295933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.295952 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.295982 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.296004 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.318044 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.324067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.324132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.324155 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.324188 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.324215 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.328416 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.328455 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.328646 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.328718 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.328911 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.329022 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.346916 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.353412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.353479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.353505 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.353538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.353565 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.377011 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.383017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.383096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.383121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.383151 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.383173 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.403734 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T18:14:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c1388c23-14d1-4724-ab12-311163f5cca5\\\",\\\"systemUUID\\\":\\\"26522ffc-f7a5-422b-aa8b-57e952227505\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:17Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:17 crc kubenswrapper[4926]: E1125 18:14:17.403996 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.406474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.406545 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.406568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.406603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.406631 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.509716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.509793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.509814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.509843 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.509862 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.613312 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.613417 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.613438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.613464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.613485 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.716504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.716603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.716624 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.716653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.716680 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.819975 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.820044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.820057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.820073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.820084 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.922940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.922997 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.923011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.923033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:17 crc kubenswrapper[4926]: I1125 18:14:17.923045 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:17Z","lastTransitionTime":"2025-11-25T18:14:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.026030 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.026089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.026106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.026132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.026150 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.130124 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.130197 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.130215 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.130245 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.130264 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.233228 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.233298 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.233315 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.233337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.233350 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.329018 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:18 crc kubenswrapper[4926]: E1125 18:14:18.329207 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.335815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.335907 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.335928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.335954 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.335974 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.439103 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.439144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.439154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.439169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.439181 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.542187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.542238 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.542251 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.542268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.542283 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.649134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.649187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.649199 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.649218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.649231 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.751866 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.751916 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.751931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.751954 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.751970 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.854415 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.854460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.854473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.854489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.854501 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.956695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.956734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.956743 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.956756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:18 crc kubenswrapper[4926]: I1125 18:14:18.956766 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:18Z","lastTransitionTime":"2025-11-25T18:14:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.059477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.059533 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.059543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.059556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.059566 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.162129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.162182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.162199 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.162229 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.162245 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.264662 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.264709 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.264719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.264735 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.264745 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.328895 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:19 crc kubenswrapper[4926]: E1125 18:14:19.329040 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.329234 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:19 crc kubenswrapper[4926]: E1125 18:14:19.329294 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.329460 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:19 crc kubenswrapper[4926]: E1125 18:14:19.329508 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.368732 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.368806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.368817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.368833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.368844 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.471876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.471940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.471955 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.471971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.471984 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.575026 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.575064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.575073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.575088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.575099 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.678176 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.678220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.678231 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.678248 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.678263 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.781444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.781524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.781547 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.781579 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.781602 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.885255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.885331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.885360 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.885434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.885468 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.988734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.988789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.988800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.988820 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:19 crc kubenswrapper[4926]: I1125 18:14:19.988833 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:19Z","lastTransitionTime":"2025-11-25T18:14:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.092544 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.092614 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.092638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.092664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.092678 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.196237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.196304 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.196325 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.196353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.196395 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.299323 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.299396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.299413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.299439 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.299455 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.329101 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:20 crc kubenswrapper[4926]: E1125 18:14:20.329343 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.360794 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d696b3-c3b8-4941-98a6-bb52494b9d1d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:12:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74be075747ddcaf24d61301b9046287cae0d27e38830c68f25e32c7f74a06f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3354a2d70e5d1705f6b1cbf2700e922602bed65e1430eb5c4854921cb3973e9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6bb8031799fde54958a57cf8c66371bbb3c5323cb7b539c80ad7714351407ff\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:12:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:12:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.379251 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82e1c9483afba140494933723f10f0aaacbd333a269e7c3714c130b9f2e8d26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.398458 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.402993 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.403069 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.403084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.403119 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.403135 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.412192 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w62m7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00adb94e-b8cf-4957-92d9-94e141cf6f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://330b9c081c6c105d389f333ee81dc3c438054846877c3072be4139e1292a0b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24dz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w62m7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.436944 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62905073-17d2-4b78-9921-02a343480b34\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T18:14:01Z\\\",\\\"message\\\":\\\"ess:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1125 18:14:01.177515 6908 lb_config.go:1031] Cluster endpoints for openshift-cluster-version/cluster-version-operator for network=default are: map[]\\\\nI1125 18:14:01.177528 6908 services_controller.go:443] Built service openshift-cluster-version/cluster-version-operator LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.182\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:9099, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1125 18:14:01.177538 6908 services_controller.go:444] Built service openshift-cluster-version/cluster-version-operator LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1125 18:14:01.176749 6908 services_controller.go:360] Finished syncing service metrics on namespace openshift-kube-controller-manager-operator for network=default : 4.640248ms\\\\nF1125 18:14:01.176942 6908 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T18:14:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T18:13:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T18:13:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T18:13:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xmzlv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T18:13:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zrwvb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T18:14:20Z is after 2025-08-24T17:21:41Z" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.461139 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=5.461114345 podStartE2EDuration="5.461114345s" podCreationTimestamp="2025-11-25 18:14:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.460688684 +0000 UTC m=+90.846202309" watchObservedRunningTime="2025-11-25 18:14:20.461114345 +0000 UTC m=+90.846627950" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.478712 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-hnjr5" podStartSLOduration=71.47867336 podStartE2EDuration="1m11.47867336s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.478581007 +0000 UTC m=+90.864094612" watchObservedRunningTime="2025-11-25 18:14:20.47867336 +0000 UTC m=+90.864187005" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.505514 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.505559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.505568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.505583 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.505593 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.540257 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=67.540235687 podStartE2EDuration="1m7.540235687s" podCreationTimestamp="2025-11-25 18:13:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.538247802 +0000 UTC m=+90.923761407" watchObservedRunningTime="2025-11-25 18:14:20.540235687 +0000 UTC m=+90.925749282" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.540654 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-r9lmm" podStartSLOduration=71.540648318 podStartE2EDuration="1m11.540648318s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.499488983 +0000 UTC m=+90.885002628" watchObservedRunningTime="2025-11-25 18:14:20.540648318 +0000 UTC m=+90.926161923" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.555292 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=35.555270111 podStartE2EDuration="35.555270111s" podCreationTimestamp="2025-11-25 18:13:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.554686985 +0000 UTC m=+90.940200580" watchObservedRunningTime="2025-11-25 18:14:20.555270111 +0000 UTC m=+90.940783716" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.608639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.608705 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.608733 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.608757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.608770 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.615660 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-49qhh" podStartSLOduration=71.615634365 podStartE2EDuration="1m11.615634365s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.614282367 +0000 UTC m=+90.999795982" watchObservedRunningTime="2025-11-25 18:14:20.615634365 +0000 UTC m=+91.001147970" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.630982 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=71.630956477 podStartE2EDuration="1m11.630956477s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.630634058 +0000 UTC m=+91.016147693" watchObservedRunningTime="2025-11-25 18:14:20.630956477 +0000 UTC m=+91.016470082" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.687255 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podStartSLOduration=71.687226977 podStartE2EDuration="1m11.687226977s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.67063089 +0000 UTC m=+91.056144495" watchObservedRunningTime="2025-11-25 18:14:20.687226977 +0000 UTC m=+91.072740572" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.701817 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-b7k6s" podStartSLOduration=70.701786148 podStartE2EDuration="1m10.701786148s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:20.688082211 +0000 UTC m=+91.073595816" watchObservedRunningTime="2025-11-25 18:14:20.701786148 +0000 UTC m=+91.087299753" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.711898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.711946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.711957 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.711976 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.711987 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.815062 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.815132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.815146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.815170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.815186 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.919883 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.919938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.919951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.919973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:20 crc kubenswrapper[4926]: I1125 18:14:20.919986 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:20Z","lastTransitionTime":"2025-11-25T18:14:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.023388 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.023439 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.023449 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.023466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.023479 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.127425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.127538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.127563 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.127596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.127617 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.231572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.231652 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.232031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.232071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.232170 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.328304 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:21 crc kubenswrapper[4926]: E1125 18:14:21.328583 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.328833 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.328971 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:21 crc kubenswrapper[4926]: E1125 18:14:21.329587 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:21 crc kubenswrapper[4926]: E1125 18:14:21.329798 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.335506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.335565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.335588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.335618 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.335640 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.439126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.439213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.439240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.439274 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.439296 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.542578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.542636 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.542656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.542682 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.542696 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.645925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.645966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.645977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.645995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.646008 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.748883 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.748948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.748958 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.748978 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.748989 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.851495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.851553 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.851564 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.851585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.851597 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.953908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.953946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.953954 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.953967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:21 crc kubenswrapper[4926]: I1125 18:14:21.953976 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:21Z","lastTransitionTime":"2025-11-25T18:14:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.057029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.057120 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.057144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.057179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.057201 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.160798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.160883 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.160909 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.160943 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.160970 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.263843 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.263901 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.263918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.263937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.263952 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.328445 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:22 crc kubenswrapper[4926]: E1125 18:14:22.328621 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.366972 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.367016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.367028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.367044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.367056 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.469691 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.469725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.469739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.469755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.469764 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.572443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.572474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.572485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.572500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.572510 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.675818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.675874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.675891 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.675911 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.675926 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.778848 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.778887 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.778897 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.778917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.778937 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.880971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.881007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.881016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.881030 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.881040 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.983443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.983523 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.983538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.983559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:22 crc kubenswrapper[4926]: I1125 18:14:22.983572 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:22Z","lastTransitionTime":"2025-11-25T18:14:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.086964 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.087042 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.087055 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.087071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.087097 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.189997 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.190057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.190069 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.190090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.190102 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.294127 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.294212 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.294235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.294269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.294294 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.328649 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.328747 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:23 crc kubenswrapper[4926]: E1125 18:14:23.328945 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:23 crc kubenswrapper[4926]: E1125 18:14:23.329152 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.329650 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:23 crc kubenswrapper[4926]: E1125 18:14:23.329916 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.397669 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.397736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.397752 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.397778 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.397795 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.501460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.501508 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.501519 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.501540 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.501552 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.605531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.605585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.605598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.605622 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.605639 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.708863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.708935 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.708955 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.708984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.709002 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.812270 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.812348 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.812363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.812411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.812425 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.916001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.916122 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.916146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.916175 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:23 crc kubenswrapper[4926]: I1125 18:14:23.916197 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:23Z","lastTransitionTime":"2025-11-25T18:14:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.020838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.020925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.020951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.020986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.021011 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.124847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.124921 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.124942 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.124969 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.124989 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.229517 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.229600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.229618 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.229648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.229669 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.328890 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:24 crc kubenswrapper[4926]: E1125 18:14:24.329163 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.334209 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.334269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.334290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.334317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.334336 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.437741 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.437808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.437826 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.437848 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.437860 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.542613 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.542705 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.542735 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.542773 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.542792 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.647236 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.647348 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.648096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.648214 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.648247 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.753217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.753831 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.754129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.754474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.754776 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.857436 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.857493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.857506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.857528 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.857542 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.962596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.962656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.962671 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.962694 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:24 crc kubenswrapper[4926]: I1125 18:14:24.962711 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:24Z","lastTransitionTime":"2025-11-25T18:14:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.065511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.065588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.065606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.065636 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.065657 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.168272 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.168322 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.168332 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.168354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.168366 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.271297 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.271814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.271987 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.272147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.272307 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.329306 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.329322 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:25 crc kubenswrapper[4926]: E1125 18:14:25.329658 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.329367 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:25 crc kubenswrapper[4926]: E1125 18:14:25.330022 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:25 crc kubenswrapper[4926]: E1125 18:14:25.329839 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.375694 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.376036 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.376130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.376223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.376315 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.478971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.479812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.480007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.480192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.480416 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.584320 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.584721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.584818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.584930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.585069 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.689810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.689887 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.689916 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.689966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.689993 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.792365 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.792428 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.792437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.792450 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.792459 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.895642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.895696 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.895722 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.895745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.895759 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.998172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.998228 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.998242 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.998257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:25 crc kubenswrapper[4926]: I1125 18:14:25.998267 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:25Z","lastTransitionTime":"2025-11-25T18:14:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.101330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.101363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.101395 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.101411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.101423 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.203627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.203679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.203697 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.203726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.203746 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.306826 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.306897 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.306915 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.306937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.306953 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.332959 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:26 crc kubenswrapper[4926]: E1125 18:14:26.333186 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.409013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.409094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.409106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.409137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.409152 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.512036 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.512117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.512155 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.512189 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.512211 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.615732 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.615774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.615783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.615797 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.615806 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.718317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.718356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.718388 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.718406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.718416 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.821341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.821396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.821408 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.821423 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.821433 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.923656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.923725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.923741 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.923762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:26 crc kubenswrapper[4926]: I1125 18:14:26.923777 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:26Z","lastTransitionTime":"2025-11-25T18:14:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.026552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.026941 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.027028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.027106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.027180 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:27Z","lastTransitionTime":"2025-11-25T18:14:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.129428 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.129510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.129527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.129549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.129578 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:27Z","lastTransitionTime":"2025-11-25T18:14:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.232654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.232701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.232739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.232756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.232805 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:27Z","lastTransitionTime":"2025-11-25T18:14:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.328723 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.328723 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:27 crc kubenswrapper[4926]: E1125 18:14:27.328990 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:27 crc kubenswrapper[4926]: E1125 18:14:27.329043 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.328739 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:27 crc kubenswrapper[4926]: E1125 18:14:27.329113 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.335962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.336019 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.336034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.336056 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.336072 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:27Z","lastTransitionTime":"2025-11-25T18:14:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.439129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.439201 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.439219 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.439250 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.439266 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:27Z","lastTransitionTime":"2025-11-25T18:14:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.542461 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.542548 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.542562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.542602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.542619 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:27Z","lastTransitionTime":"2025-11-25T18:14:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.574668 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.574752 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.574774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.574804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.574823 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T18:14:27Z","lastTransitionTime":"2025-11-25T18:14:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.632093 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw"] Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.632582 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.635943 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.636096 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.636297 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.637170 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.692404 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=77.69235442 podStartE2EDuration="1m17.69235442s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:27.691670471 +0000 UTC m=+98.077184106" watchObservedRunningTime="2025-11-25 18:14:27.69235442 +0000 UTC m=+98.077868025" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.724131 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8feb6ac5-1410-4034-ba30-87d367c1cb07-service-ca\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.724593 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8feb6ac5-1410-4034-ba30-87d367c1cb07-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.724792 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8feb6ac5-1410-4034-ba30-87d367c1cb07-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.725046 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8feb6ac5-1410-4034-ba30-87d367c1cb07-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.725184 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8feb6ac5-1410-4034-ba30-87d367c1cb07-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.737876 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-w62m7" podStartSLOduration=78.737856424 podStartE2EDuration="1m18.737856424s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:27.736921359 +0000 UTC m=+98.122434964" watchObservedRunningTime="2025-11-25 18:14:27.737856424 +0000 UTC m=+98.123370019" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.825678 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8feb6ac5-1410-4034-ba30-87d367c1cb07-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.825736 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8feb6ac5-1410-4034-ba30-87d367c1cb07-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.825761 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8feb6ac5-1410-4034-ba30-87d367c1cb07-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.825792 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8feb6ac5-1410-4034-ba30-87d367c1cb07-service-ca\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.825824 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8feb6ac5-1410-4034-ba30-87d367c1cb07-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.825973 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8feb6ac5-1410-4034-ba30-87d367c1cb07-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.826170 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8feb6ac5-1410-4034-ba30-87d367c1cb07-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.826882 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8feb6ac5-1410-4034-ba30-87d367c1cb07-service-ca\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.834903 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8feb6ac5-1410-4034-ba30-87d367c1cb07-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.845303 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8feb6ac5-1410-4034-ba30-87d367c1cb07-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-twhsw\" (UID: \"8feb6ac5-1410-4034-ba30-87d367c1cb07\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:27 crc kubenswrapper[4926]: I1125 18:14:27.949590 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" Nov 25 18:14:28 crc kubenswrapper[4926]: I1125 18:14:28.129299 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:28 crc kubenswrapper[4926]: E1125 18:14:28.129548 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:14:28 crc kubenswrapper[4926]: E1125 18:14:28.130041 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs podName:6aa4d7ff-fb65-4a4b-b745-8bb9151862f5 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:32.130013152 +0000 UTC m=+162.515526747 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs") pod "network-metrics-daemon-2mwzk" (UID: "6aa4d7ff-fb65-4a4b-b745-8bb9151862f5") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 18:14:28 crc kubenswrapper[4926]: I1125 18:14:28.329088 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:28 crc kubenswrapper[4926]: E1125 18:14:28.329247 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:28 crc kubenswrapper[4926]: I1125 18:14:28.330858 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:14:28 crc kubenswrapper[4926]: E1125 18:14:28.331220 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zrwvb_openshift-ovn-kubernetes(62905073-17d2-4b78-9921-02a343480b34)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" Nov 25 18:14:28 crc kubenswrapper[4926]: I1125 18:14:28.865229 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" event={"ID":"8feb6ac5-1410-4034-ba30-87d367c1cb07","Type":"ContainerStarted","Data":"799e32e0ccd871f8566643a8b5e1e5a437d80b03367586a1e32edc1c981d8d73"} Nov 25 18:14:28 crc kubenswrapper[4926]: I1125 18:14:28.865294 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" event={"ID":"8feb6ac5-1410-4034-ba30-87d367c1cb07","Type":"ContainerStarted","Data":"b8c7683b39e17711a30d170f82b8dc906d125ad8564b8d319fd8ca47ecfa6d1c"} Nov 25 18:14:28 crc kubenswrapper[4926]: I1125 18:14:28.884097 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-twhsw" podStartSLOduration=79.884068515 podStartE2EDuration="1m19.884068515s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:28.881044032 +0000 UTC m=+99.266557647" watchObservedRunningTime="2025-11-25 18:14:28.884068515 +0000 UTC m=+99.269582120" Nov 25 18:14:29 crc kubenswrapper[4926]: I1125 18:14:29.329430 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:29 crc kubenswrapper[4926]: I1125 18:14:29.329520 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:29 crc kubenswrapper[4926]: E1125 18:14:29.329670 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:29 crc kubenswrapper[4926]: I1125 18:14:29.329698 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:29 crc kubenswrapper[4926]: E1125 18:14:29.329782 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:29 crc kubenswrapper[4926]: E1125 18:14:29.330458 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:30 crc kubenswrapper[4926]: I1125 18:14:30.328903 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:30 crc kubenswrapper[4926]: E1125 18:14:30.329151 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:31 crc kubenswrapper[4926]: I1125 18:14:31.328361 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:31 crc kubenswrapper[4926]: E1125 18:14:31.328514 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:31 crc kubenswrapper[4926]: I1125 18:14:31.328419 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:31 crc kubenswrapper[4926]: E1125 18:14:31.328579 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:31 crc kubenswrapper[4926]: I1125 18:14:31.328401 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:31 crc kubenswrapper[4926]: E1125 18:14:31.328953 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:32 crc kubenswrapper[4926]: I1125 18:14:32.329114 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:32 crc kubenswrapper[4926]: E1125 18:14:32.329244 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:33 crc kubenswrapper[4926]: I1125 18:14:33.328424 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:33 crc kubenswrapper[4926]: I1125 18:14:33.328461 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:33 crc kubenswrapper[4926]: E1125 18:14:33.328604 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:33 crc kubenswrapper[4926]: I1125 18:14:33.328690 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:33 crc kubenswrapper[4926]: E1125 18:14:33.328830 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:33 crc kubenswrapper[4926]: E1125 18:14:33.329312 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:34 crc kubenswrapper[4926]: I1125 18:14:34.329119 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:34 crc kubenswrapper[4926]: E1125 18:14:34.329303 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:35 crc kubenswrapper[4926]: I1125 18:14:35.328339 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:35 crc kubenswrapper[4926]: I1125 18:14:35.328440 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:35 crc kubenswrapper[4926]: I1125 18:14:35.328418 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:35 crc kubenswrapper[4926]: E1125 18:14:35.328557 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:35 crc kubenswrapper[4926]: E1125 18:14:35.329000 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:35 crc kubenswrapper[4926]: E1125 18:14:35.329220 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:36 crc kubenswrapper[4926]: I1125 18:14:36.329107 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:36 crc kubenswrapper[4926]: E1125 18:14:36.329298 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:37 crc kubenswrapper[4926]: I1125 18:14:37.328805 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:37 crc kubenswrapper[4926]: I1125 18:14:37.328939 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:37 crc kubenswrapper[4926]: I1125 18:14:37.329099 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:37 crc kubenswrapper[4926]: E1125 18:14:37.328955 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:37 crc kubenswrapper[4926]: E1125 18:14:37.329197 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:37 crc kubenswrapper[4926]: E1125 18:14:37.329289 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:38 crc kubenswrapper[4926]: I1125 18:14:38.329239 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:38 crc kubenswrapper[4926]: E1125 18:14:38.329440 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:39 crc kubenswrapper[4926]: I1125 18:14:39.329066 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:39 crc kubenswrapper[4926]: I1125 18:14:39.329107 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:39 crc kubenswrapper[4926]: I1125 18:14:39.329200 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:39 crc kubenswrapper[4926]: E1125 18:14:39.329904 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:39 crc kubenswrapper[4926]: E1125 18:14:39.329845 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:39 crc kubenswrapper[4926]: E1125 18:14:39.329786 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:40 crc kubenswrapper[4926]: I1125 18:14:40.328640 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:40 crc kubenswrapper[4926]: E1125 18:14:40.331189 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:41 crc kubenswrapper[4926]: I1125 18:14:41.328647 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:41 crc kubenswrapper[4926]: I1125 18:14:41.328739 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:41 crc kubenswrapper[4926]: I1125 18:14:41.328794 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:41 crc kubenswrapper[4926]: E1125 18:14:41.328941 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:41 crc kubenswrapper[4926]: E1125 18:14:41.329151 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:41 crc kubenswrapper[4926]: E1125 18:14:41.329408 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:42 crc kubenswrapper[4926]: I1125 18:14:42.329159 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:42 crc kubenswrapper[4926]: E1125 18:14:42.329344 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.328492 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.328597 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.328855 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:43 crc kubenswrapper[4926]: E1125 18:14:43.329260 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:43 crc kubenswrapper[4926]: E1125 18:14:43.329532 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:43 crc kubenswrapper[4926]: E1125 18:14:43.329582 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.329856 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.916326 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/1.log" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.916729 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/0.log" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.916765 4926 generic.go:334] "Generic (PLEG): container finished" podID="78af77fa-0071-48e9-8b78-bdd92abfb013" containerID="c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd" exitCode=1 Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.916819 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerDied","Data":"c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd"} Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.916945 4926 scope.go:117] "RemoveContainer" containerID="14678d89653a46b734d1ef02a6f8cf5c5b77ede70136d2ad623338bcb7c4ee17" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.917294 4926 scope.go:117] "RemoveContainer" containerID="c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd" Nov 25 18:14:43 crc kubenswrapper[4926]: E1125 18:14:43.917482 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-49qhh_openshift-multus(78af77fa-0071-48e9-8b78-bdd92abfb013)\"" pod="openshift-multus/multus-49qhh" podUID="78af77fa-0071-48e9-8b78-bdd92abfb013" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.920233 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/3.log" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.932620 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerStarted","Data":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.932980 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:14:43 crc kubenswrapper[4926]: I1125 18:14:43.973448 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podStartSLOduration=94.973429176 podStartE2EDuration="1m34.973429176s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:14:43.972653905 +0000 UTC m=+114.358167510" watchObservedRunningTime="2025-11-25 18:14:43.973429176 +0000 UTC m=+114.358942781" Nov 25 18:14:44 crc kubenswrapper[4926]: I1125 18:14:44.155734 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-2mwzk"] Nov 25 18:14:44 crc kubenswrapper[4926]: I1125 18:14:44.155830 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:44 crc kubenswrapper[4926]: E1125 18:14:44.155907 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:44 crc kubenswrapper[4926]: I1125 18:14:44.328877 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:44 crc kubenswrapper[4926]: E1125 18:14:44.329212 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:44 crc kubenswrapper[4926]: I1125 18:14:44.937040 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/1.log" Nov 25 18:14:45 crc kubenswrapper[4926]: I1125 18:14:45.329422 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:45 crc kubenswrapper[4926]: I1125 18:14:45.329498 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:45 crc kubenswrapper[4926]: E1125 18:14:45.329568 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:45 crc kubenswrapper[4926]: E1125 18:14:45.329786 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:46 crc kubenswrapper[4926]: I1125 18:14:46.328319 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:46 crc kubenswrapper[4926]: I1125 18:14:46.328575 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:46 crc kubenswrapper[4926]: E1125 18:14:46.328641 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:46 crc kubenswrapper[4926]: E1125 18:14:46.328897 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:47 crc kubenswrapper[4926]: I1125 18:14:47.328532 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:47 crc kubenswrapper[4926]: I1125 18:14:47.328641 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:47 crc kubenswrapper[4926]: E1125 18:14:47.328813 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:47 crc kubenswrapper[4926]: E1125 18:14:47.328928 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:48 crc kubenswrapper[4926]: I1125 18:14:48.328390 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:48 crc kubenswrapper[4926]: I1125 18:14:48.328494 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:48 crc kubenswrapper[4926]: E1125 18:14:48.328540 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:48 crc kubenswrapper[4926]: E1125 18:14:48.328655 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:49 crc kubenswrapper[4926]: I1125 18:14:49.328137 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:49 crc kubenswrapper[4926]: I1125 18:14:49.328159 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:49 crc kubenswrapper[4926]: E1125 18:14:49.328342 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:49 crc kubenswrapper[4926]: E1125 18:14:49.328261 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:50 crc kubenswrapper[4926]: I1125 18:14:50.328242 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:50 crc kubenswrapper[4926]: I1125 18:14:50.328250 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:50 crc kubenswrapper[4926]: E1125 18:14:50.328679 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:50 crc kubenswrapper[4926]: E1125 18:14:50.328833 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:50 crc kubenswrapper[4926]: E1125 18:14:50.346967 4926 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 18:14:50 crc kubenswrapper[4926]: E1125 18:14:50.562719 4926 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 18:14:51 crc kubenswrapper[4926]: I1125 18:14:51.329204 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:51 crc kubenswrapper[4926]: E1125 18:14:51.329447 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:51 crc kubenswrapper[4926]: I1125 18:14:51.329692 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:51 crc kubenswrapper[4926]: E1125 18:14:51.329860 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:52 crc kubenswrapper[4926]: I1125 18:14:52.328270 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:52 crc kubenswrapper[4926]: I1125 18:14:52.328521 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:52 crc kubenswrapper[4926]: E1125 18:14:52.328663 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:52 crc kubenswrapper[4926]: E1125 18:14:52.328802 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:53 crc kubenswrapper[4926]: I1125 18:14:53.328420 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:53 crc kubenswrapper[4926]: E1125 18:14:53.328571 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:53 crc kubenswrapper[4926]: I1125 18:14:53.328420 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:53 crc kubenswrapper[4926]: E1125 18:14:53.328765 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:54 crc kubenswrapper[4926]: I1125 18:14:54.328432 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:54 crc kubenswrapper[4926]: E1125 18:14:54.328623 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:54 crc kubenswrapper[4926]: I1125 18:14:54.328776 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:54 crc kubenswrapper[4926]: E1125 18:14:54.329062 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:55 crc kubenswrapper[4926]: I1125 18:14:55.328629 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:55 crc kubenswrapper[4926]: E1125 18:14:55.328802 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:55 crc kubenswrapper[4926]: I1125 18:14:55.328633 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:55 crc kubenswrapper[4926]: E1125 18:14:55.329054 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:55 crc kubenswrapper[4926]: E1125 18:14:55.565060 4926 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 18:14:56 crc kubenswrapper[4926]: I1125 18:14:56.328770 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:56 crc kubenswrapper[4926]: I1125 18:14:56.328770 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:56 crc kubenswrapper[4926]: E1125 18:14:56.329019 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:56 crc kubenswrapper[4926]: E1125 18:14:56.329182 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:57 crc kubenswrapper[4926]: I1125 18:14:57.328405 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:57 crc kubenswrapper[4926]: I1125 18:14:57.328512 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:57 crc kubenswrapper[4926]: E1125 18:14:57.328549 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:57 crc kubenswrapper[4926]: E1125 18:14:57.328703 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:14:58 crc kubenswrapper[4926]: I1125 18:14:58.329005 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:14:58 crc kubenswrapper[4926]: I1125 18:14:58.329393 4926 scope.go:117] "RemoveContainer" containerID="c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd" Nov 25 18:14:58 crc kubenswrapper[4926]: E1125 18:14:58.329406 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:14:58 crc kubenswrapper[4926]: I1125 18:14:58.329008 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:14:58 crc kubenswrapper[4926]: E1125 18:14:58.329479 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:14:58 crc kubenswrapper[4926]: I1125 18:14:58.994277 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/1.log" Nov 25 18:14:58 crc kubenswrapper[4926]: I1125 18:14:58.994367 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerStarted","Data":"578dba399f1b6a1dc334859cac5b006c3b3927f1a980d23e434bc3236dd17e01"} Nov 25 18:14:59 crc kubenswrapper[4926]: I1125 18:14:59.328876 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:14:59 crc kubenswrapper[4926]: I1125 18:14:59.328904 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:14:59 crc kubenswrapper[4926]: E1125 18:14:59.329016 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 18:14:59 crc kubenswrapper[4926]: E1125 18:14:59.329099 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 18:15:00 crc kubenswrapper[4926]: I1125 18:15:00.328705 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:15:00 crc kubenswrapper[4926]: E1125 18:15:00.328831 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-2mwzk" podUID="6aa4d7ff-fb65-4a4b-b745-8bb9151862f5" Nov 25 18:15:00 crc kubenswrapper[4926]: I1125 18:15:00.328714 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:15:00 crc kubenswrapper[4926]: E1125 18:15:00.329010 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 18:15:01 crc kubenswrapper[4926]: I1125 18:15:01.328180 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:15:01 crc kubenswrapper[4926]: I1125 18:15:01.328196 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:15:01 crc kubenswrapper[4926]: I1125 18:15:01.331684 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 18:15:01 crc kubenswrapper[4926]: I1125 18:15:01.331697 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 18:15:01 crc kubenswrapper[4926]: I1125 18:15:01.331844 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 18:15:01 crc kubenswrapper[4926]: I1125 18:15:01.331912 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 18:15:01 crc kubenswrapper[4926]: I1125 18:15:01.543430 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:15:02 crc kubenswrapper[4926]: I1125 18:15:02.328714 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:15:02 crc kubenswrapper[4926]: I1125 18:15:02.329081 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:15:02 crc kubenswrapper[4926]: I1125 18:15:02.332024 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 18:15:02 crc kubenswrapper[4926]: I1125 18:15:02.334885 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.915686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.971967 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tfmxn"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.973757 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.979135 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.980079 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.981774 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-l89wr"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.982300 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.990042 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.990149 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.990421 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.990589 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.990743 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991101 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991128 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991260 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991358 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991424 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991550 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991777 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.991798 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.992088 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.992921 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.993731 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.994138 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.994413 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.994863 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.997921 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qkn8k"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.998366 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-597mc"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.998772 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65"] Nov 25 18:15:08 crc kubenswrapper[4926]: I1125 18:15:08.999287 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:08.999539 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:08.999582 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.000661 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-hmfpp"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.008011 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gzmf7"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.008242 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.008531 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.008803 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4pxkr"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.009206 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.009445 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dxszx"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.009736 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.009761 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.010002 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.010279 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.010312 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.010285 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.012193 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.016481 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dzlmz"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.018079 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-r4ssr"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.019251 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.020720 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-w7m5b"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.019422 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-r4ssr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.021581 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.019383 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.027901 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095506 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-image-import-ca\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-etcd-client\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095588 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/160e9743-11c4-4436-a4e0-6c757c2e35ea-serving-cert\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095623 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksh94\" (UniqueName: \"kubernetes.io/projected/b0520f02-d6e3-4491-89bd-cf765e803a79-kube-api-access-ksh94\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095650 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74tmx\" (UniqueName: \"kubernetes.io/projected/160e9743-11c4-4436-a4e0-6c757c2e35ea-kube-api-access-74tmx\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095678 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/900fd29e-9f47-40d8-b232-fca71cd10642-audit-dir\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095704 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxvtk\" (UniqueName: \"kubernetes.io/projected/5eb74999-15a1-4b88-a6b3-d325e2331e41-kube-api-access-pxvtk\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095728 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e23f03ee-4637-49c8-b033-162178e9c4e6-serving-cert\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095755 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzgph\" (UniqueName: \"kubernetes.io/projected/afa57566-908a-4b08-846a-c2893f683b5e-kube-api-access-xzgph\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095776 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-serving-cert\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095802 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9ggl\" (UniqueName: \"kubernetes.io/projected/36fc28b4-ce41-44b8-a384-c5434107c068-kube-api-access-b9ggl\") pod \"cluster-samples-operator-665b6dd947-wrv6b\" (UID: \"36fc28b4-ce41-44b8-a384-c5434107c068\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095827 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-client\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.095931 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/160e9743-11c4-4436-a4e0-6c757c2e35ea-config\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.096206 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b0520f02-d6e3-4491-89bd-cf765e803a79-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.097798 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eb74999-15a1-4b88-a6b3-d325e2331e41-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.097831 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbjl6\" (UniqueName: \"kubernetes.io/projected/7c13f08b-2870-484a-a06b-e671feb57ac4-kube-api-access-tbjl6\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.097856 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b0520f02-d6e3-4491-89bd-cf765e803a79-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.097900 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf86c919-c2dd-4177-8572-fabf8822b35b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.097923 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.097949 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62j64\" (UniqueName: \"kubernetes.io/projected/900fd29e-9f47-40d8-b232-fca71cd10642-kube-api-access-62j64\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.097980 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-client-ca\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098009 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-config\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098049 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-etcd-serving-ca\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098075 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/21ab4d7d-54a0-4c82-b742-a2e65d689b18-metrics-tls\") pod \"dns-operator-744455d44c-hmfpp\" (UID: \"21ab4d7d-54a0-4c82-b742-a2e65d689b18\") " pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098100 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/36fc28b4-ce41-44b8-a384-c5434107c068-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wrv6b\" (UID: \"36fc28b4-ce41-44b8-a384-c5434107c068\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098121 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-images\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098195 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf86c919-c2dd-4177-8572-fabf8822b35b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098263 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-audit\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098299 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098328 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-service-ca\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098352 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e23f03ee-4637-49c8-b033-162178e9c4e6-available-featuregates\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098412 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-config\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098435 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/900fd29e-9f47-40d8-b232-fca71cd10642-node-pullsecrets\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098455 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-ca\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098521 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-config\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098544 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-encryption-config\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098560 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098585 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eb74999-15a1-4b88-a6b3-d325e2331e41-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098606 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-config\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098625 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8d2r\" (UniqueName: \"kubernetes.io/projected/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-kube-api-access-s8d2r\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098659 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/160e9743-11c4-4436-a4e0-6c757c2e35ea-trusted-ca\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098708 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2x77\" (UniqueName: \"kubernetes.io/projected/bf86c919-c2dd-4177-8572-fabf8822b35b-kube-api-access-f2x77\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098729 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-etcd-client\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098767 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-serving-cert\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098784 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-audit-policies\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098800 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098817 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-encryption-config\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098833 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/afa57566-908a-4b08-846a-c2893f683b5e-audit-dir\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098850 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w774b\" (UniqueName: \"kubernetes.io/projected/e23f03ee-4637-49c8-b033-162178e9c4e6-kube-api-access-w774b\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098913 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098971 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fphc2\" (UniqueName: \"kubernetes.io/projected/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-kube-api-access-fphc2\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.098991 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13f08b-2870-484a-a06b-e671feb57ac4-serving-cert\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.099012 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ds75\" (UniqueName: \"kubernetes.io/projected/21ab4d7d-54a0-4c82-b742-a2e65d689b18-kube-api-access-2ds75\") pod \"dns-operator-744455d44c-hmfpp\" (UID: \"21ab4d7d-54a0-4c82-b742-a2e65d689b18\") " pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.099028 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f93306-167d-421d-90f4-3fd8652dffd1-serving-cert\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.099047 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-config\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.099063 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqgvw\" (UniqueName: \"kubernetes.io/projected/f5f93306-167d-421d-90f4-3fd8652dffd1-kube-api-access-nqgvw\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.099080 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-client-ca\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.099097 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-serving-cert\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.099113 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0520f02-d6e3-4491-89bd-cf765e803a79-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.120098 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.144701 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tfmxn"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.154728 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.154764 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.155678 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.169849 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.170599 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.170691 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.170868 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171112 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171186 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171281 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171296 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171314 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171301 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171505 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171519 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171534 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171603 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171633 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.170617 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171712 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171757 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171784 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171637 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171820 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171853 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171839 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171893 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171902 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171935 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.171979 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172006 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172089 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172293 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172681 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172768 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172816 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172862 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.172865 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173041 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173215 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173431 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173507 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173573 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173640 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173692 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173738 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173791 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173826 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173704 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.173902 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.174009 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.174102 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.174208 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.174829 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.174957 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.175316 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.175651 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.175675 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.175903 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.176044 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.176175 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.176342 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.176550 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.176758 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.176987 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.177228 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.177482 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.177623 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.177925 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178084 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178202 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178391 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178438 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178391 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178512 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.174216 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178709 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178776 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178868 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-l89wr"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.178961 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.179088 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.179177 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.179496 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.181292 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.181931 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.187897 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.193971 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.195659 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.195906 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.196726 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.198663 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.198710 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.199440 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.200275 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.201190 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.201983 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215585 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxvtk\" (UniqueName: \"kubernetes.io/projected/5eb74999-15a1-4b88-a6b3-d325e2331e41-kube-api-access-pxvtk\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e23f03ee-4637-49c8-b033-162178e9c4e6-serving-cert\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzgph\" (UniqueName: \"kubernetes.io/projected/afa57566-908a-4b08-846a-c2893f683b5e-kube-api-access-xzgph\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215679 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-serving-cert\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215708 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9ggl\" (UniqueName: \"kubernetes.io/projected/36fc28b4-ce41-44b8-a384-c5434107c068-kube-api-access-b9ggl\") pod \"cluster-samples-operator-665b6dd947-wrv6b\" (UID: \"36fc28b4-ce41-44b8-a384-c5434107c068\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215738 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-client\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215765 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/160e9743-11c4-4436-a4e0-6c757c2e35ea-config\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215790 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b0520f02-d6e3-4491-89bd-cf765e803a79-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215840 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eb74999-15a1-4b88-a6b3-d325e2331e41-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215873 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbjl6\" (UniqueName: \"kubernetes.io/projected/7c13f08b-2870-484a-a06b-e671feb57ac4-kube-api-access-tbjl6\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215892 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b0520f02-d6e3-4491-89bd-cf765e803a79-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215930 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf86c919-c2dd-4177-8572-fabf8822b35b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215953 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.215977 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62j64\" (UniqueName: \"kubernetes.io/projected/900fd29e-9f47-40d8-b232-fca71cd10642-kube-api-access-62j64\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216001 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-client-ca\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216025 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-config\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216056 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-etcd-serving-ca\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216076 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/21ab4d7d-54a0-4c82-b742-a2e65d689b18-metrics-tls\") pod \"dns-operator-744455d44c-hmfpp\" (UID: \"21ab4d7d-54a0-4c82-b742-a2e65d689b18\") " pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216098 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/36fc28b4-ce41-44b8-a384-c5434107c068-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wrv6b\" (UID: \"36fc28b4-ce41-44b8-a384-c5434107c068\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216124 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-images\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216154 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf86c919-c2dd-4177-8572-fabf8822b35b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216176 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-audit\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216199 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216222 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-service-ca\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216246 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e23f03ee-4637-49c8-b033-162178e9c4e6-available-featuregates\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216270 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-config\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216290 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/900fd29e-9f47-40d8-b232-fca71cd10642-node-pullsecrets\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216311 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-ca\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216347 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-config\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216387 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-encryption-config\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216409 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216436 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eb74999-15a1-4b88-a6b3-d325e2331e41-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216456 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-config\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216482 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8d2r\" (UniqueName: \"kubernetes.io/projected/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-kube-api-access-s8d2r\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216504 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/160e9743-11c4-4436-a4e0-6c757c2e35ea-trusted-ca\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216544 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2x77\" (UniqueName: \"kubernetes.io/projected/bf86c919-c2dd-4177-8572-fabf8822b35b-kube-api-access-f2x77\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216570 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-etcd-client\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216594 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-serving-cert\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216615 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-audit-policies\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216654 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-encryption-config\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216674 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/afa57566-908a-4b08-846a-c2893f683b5e-audit-dir\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216697 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w774b\" (UniqueName: \"kubernetes.io/projected/e23f03ee-4637-49c8-b033-162178e9c4e6-kube-api-access-w774b\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216733 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216797 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fphc2\" (UniqueName: \"kubernetes.io/projected/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-kube-api-access-fphc2\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216834 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13f08b-2870-484a-a06b-e671feb57ac4-serving-cert\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216859 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ds75\" (UniqueName: \"kubernetes.io/projected/21ab4d7d-54a0-4c82-b742-a2e65d689b18-kube-api-access-2ds75\") pod \"dns-operator-744455d44c-hmfpp\" (UID: \"21ab4d7d-54a0-4c82-b742-a2e65d689b18\") " pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216893 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f93306-167d-421d-90f4-3fd8652dffd1-serving-cert\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216930 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-config\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216957 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqgvw\" (UniqueName: \"kubernetes.io/projected/f5f93306-167d-421d-90f4-3fd8652dffd1-kube-api-access-nqgvw\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.216988 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-client-ca\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217016 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-serving-cert\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217043 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0520f02-d6e3-4491-89bd-cf765e803a79-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217071 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-image-import-ca\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217105 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-etcd-client\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217130 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/160e9743-11c4-4436-a4e0-6c757c2e35ea-serving-cert\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217155 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksh94\" (UniqueName: \"kubernetes.io/projected/b0520f02-d6e3-4491-89bd-cf765e803a79-kube-api-access-ksh94\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217178 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74tmx\" (UniqueName: \"kubernetes.io/projected/160e9743-11c4-4436-a4e0-6c757c2e35ea-kube-api-access-74tmx\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217199 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/900fd29e-9f47-40d8-b232-fca71cd10642-audit-dir\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.217294 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/900fd29e-9f47-40d8-b232-fca71cd10642-audit-dir\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.221804 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5eb74999-15a1-4b88-a6b3-d325e2331e41-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.222653 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.223343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-config\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.228197 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-encryption-config\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.228871 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-etcd-serving-ca\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.229760 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf86c919-c2dd-4177-8572-fabf8822b35b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.230050 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-serving-cert\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.231510 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-client-ca\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.232250 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.232365 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-client-ca\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.232901 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-service-ca\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.236195 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.239921 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf86c919-c2dd-4177-8572-fabf8822b35b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.241230 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-config\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.243242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-etcd-client\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.244560 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f93306-167d-421d-90f4-3fd8652dffd1-serving-cert\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.265486 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/afa57566-908a-4b08-846a-c2893f683b5e-audit-dir\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266021 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/160e9743-11c4-4436-a4e0-6c757c2e35ea-serving-cert\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266320 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-image-import-ca\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266463 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266511 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13f08b-2870-484a-a06b-e671feb57ac4-serving-cert\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266480 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-images\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266651 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-config\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266667 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266938 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.266974 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/36fc28b4-ce41-44b8-a384-c5434107c068-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wrv6b\" (UID: \"36fc28b4-ce41-44b8-a384-c5434107c068\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.267487 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e23f03ee-4637-49c8-b033-162178e9c4e6-serving-cert\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.267672 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-config\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.267806 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/900fd29e-9f47-40d8-b232-fca71cd10642-node-pullsecrets\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.267825 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-audit-policies\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.268209 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-client\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.268467 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.268531 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-config\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.269062 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/900fd29e-9f47-40d8-b232-fca71cd10642-audit\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.269075 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f5f93306-167d-421d-90f4-3fd8652dffd1-etcd-ca\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.269067 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0520f02-d6e3-4491-89bd-cf765e803a79-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.269455 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e23f03ee-4637-49c8-b033-162178e9c4e6-available-featuregates\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.270392 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/160e9743-11c4-4436-a4e0-6c757c2e35ea-config\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.270445 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/afa57566-908a-4b08-846a-c2893f683b5e-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.270497 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.270979 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/21ab4d7d-54a0-4c82-b742-a2e65d689b18-metrics-tls\") pod \"dns-operator-744455d44c-hmfpp\" (UID: \"21ab4d7d-54a0-4c82-b742-a2e65d689b18\") " pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.271145 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.271335 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/160e9743-11c4-4436-a4e0-6c757c2e35ea-trusted-ca\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.272024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5eb74999-15a1-4b88-a6b3-d325e2331e41-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.274327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b0520f02-d6e3-4491-89bd-cf765e803a79-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.274881 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/900fd29e-9f47-40d8-b232-fca71cd10642-serving-cert\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.275253 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-encryption-config\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.278335 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.278748 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/afa57566-908a-4b08-846a-c2893f683b5e-etcd-client\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.280278 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-p5t89"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.281058 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.283558 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.284754 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.285188 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.286549 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.287328 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.287869 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.288511 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.289503 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qkn8k"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.291392 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-bjxzl"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.292048 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-serving-cert\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.292612 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.293003 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.293709 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.294349 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.295904 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.296735 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.297188 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.298401 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.298949 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.299430 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gzmf7"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.300950 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.301208 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.301911 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.302034 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.303363 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.304064 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.304203 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.305713 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.306043 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m6qg7"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.306766 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.310003 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.310816 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.311642 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-tfggb"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.312933 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.312969 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.314799 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.320797 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.320835 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-597mc"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.320847 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4pxkr"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.320858 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-p8xd7"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.316481 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.321255 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-hmfpp"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.321357 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.321387 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.317240 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.326579 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dxszx"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.338081 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.339677 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.339931 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.341041 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-cfm2d"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.342150 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.342642 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.343975 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dzlmz"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.345189 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.346438 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.349092 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.349222 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.351331 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.352523 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m6qg7"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.353834 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-w7m5b"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.354927 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-r4ssr"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.356232 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.357502 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.357971 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.358708 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.359785 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.361134 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.362385 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-bjxzl"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.363477 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.364538 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.365787 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.366944 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-tfggb"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.368099 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-cfm2d"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.369245 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.371564 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nqxtc"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.374062 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.374245 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.374800 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-p8xd7"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.376465 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-npw6k"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.376897 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.377440 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nqxtc"] Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.377582 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.396182 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.416565 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.476891 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.496607 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.516329 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519393 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-trusted-ca\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519434 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9zbl\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-kube-api-access-k9zbl\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519461 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/881e35fe-f917-461a-a1d6-804e58b5b740-installation-pull-secrets\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519508 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-serving-cert\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519532 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-trusted-ca-bundle\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519550 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-oauth-serving-cert\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519566 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvvjj\" (UniqueName: \"kubernetes.io/projected/e67bd5e5-a3c9-4576-93e6-6d7073142160-kube-api-access-cvvjj\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519591 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-oauth-config\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519649 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-bound-sa-token\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519688 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519739 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-registry-certificates\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519784 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-service-ca\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519808 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/881e35fe-f917-461a-a1d6-804e58b5b740-ca-trust-extracted\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519826 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-config\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.519873 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-registry-tls\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: E1125 18:15:09.520123 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.020105031 +0000 UTC m=+140.405618856 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.545654 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.558085 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.577022 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.597843 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.616359 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.620458 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.620772 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-bound-sa-token\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.620851 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5h77\" (UniqueName: \"kubernetes.io/projected/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-kube-api-access-d5h77\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621355 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68b9c21a-f80e-4ae9-8bbe-63c3af244602-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621430 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d189b2f0-78ae-4cfb-8965-eb98399c8de8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621570 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-cabundle\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621617 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621661 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-config\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621685 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-service-ca\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621708 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwplv\" (UniqueName: \"kubernetes.io/projected/455c82d6-6c13-4315-9610-a50e40fb528f-kube-api-access-rwplv\") pod \"control-plane-machine-set-operator-78cbb6b69f-x5l94\" (UID: \"455c82d6-6c13-4315-9610-a50e40fb528f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621730 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-config\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621753 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9a3b0d5b-a479-44ec-8144-630698bb2792-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-bjxzl\" (UID: \"9a3b0d5b-a479-44ec-8144-630698bb2792\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621783 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f77438bb-ad38-4daa-ba55-108543030e57-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621803 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621823 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/881e35fe-f917-461a-a1d6-804e58b5b740-ca-trust-extracted\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621845 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621870 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d189b2f0-78ae-4cfb-8965-eb98399c8de8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621893 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6627639e-ac72-44d9-a2f5-837d5244688c-config-volume\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621920 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhjx8\" (UniqueName: \"kubernetes.io/projected/00045d3a-a833-44a7-87db-45cf8cfb26d1-kube-api-access-hhjx8\") pod \"downloads-7954f5f757-r4ssr\" (UID: \"00045d3a-a833-44a7-87db-45cf8cfb26d1\") " pod="openshift-console/downloads-7954f5f757-r4ssr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621946 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.621986 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-config\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622020 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-plugins-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622063 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-key\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622089 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dc9e969-666a-4b3b-ab6d-4b503e54c481-config\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622129 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2xcd\" (UniqueName: \"kubernetes.io/projected/98b3f18b-4b82-4b50-ac67-31ace23273f3-kube-api-access-t2xcd\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622174 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/455c82d6-6c13-4315-9610-a50e40fb528f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-x5l94\" (UID: \"455c82d6-6c13-4315-9610-a50e40fb528f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622236 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-certs\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622270 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6560e66-aef6-4fd2-b808-4bdfaad6b992-secret-volume\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622303 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-default-certificate\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622335 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/98b3f18b-4b82-4b50-ac67-31ace23273f3-metrics-tls\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622412 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gb9s\" (UniqueName: \"kubernetes.io/projected/9bd152c8-7711-488d-a764-d8ffefe66454-kube-api-access-6gb9s\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622452 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fd6e266-93f5-4345-914e-b10b90cd4378-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622479 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3aa5b308-f022-46f4-8952-a2fa8815572c-tmpfs\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622506 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9zbl\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-kube-api-access-k9zbl\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622530 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622552 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622580 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622609 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-node-bootstrap-token\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622635 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622662 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-serving-cert\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622682 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-trusted-ca-bundle\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622703 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-oauth-serving-cert\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622723 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvvjj\" (UniqueName: \"kubernetes.io/projected/e67bd5e5-a3c9-4576-93e6-6d7073142160-kube-api-access-cvvjj\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622747 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfnzx\" (UniqueName: \"kubernetes.io/projected/3e21465b-c285-4623-9566-f4998c280e16-kube-api-access-rfnzx\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622790 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3aa5b308-f022-46f4-8952-a2fa8815572c-apiservice-cert\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622813 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ee4dcba5-cece-4763-93e2-3e08ab0b883b-proxy-tls\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622845 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd6e266-93f5-4345-914e-b10b90cd4378-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622865 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622887 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-oauth-config\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.622999 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn8bc\" (UniqueName: \"kubernetes.io/projected/ee4dcba5-cece-4763-93e2-3e08ab0b883b-kube-api-access-dn8bc\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.623895 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/881e35fe-f917-461a-a1d6-804e58b5b740-ca-trust-extracted\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.624435 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-oauth-serving-cert\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.625410 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-service-ca\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626471 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-config\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626481 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-machine-approver-tls\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: E1125 18:15:09.626545 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.126507211 +0000 UTC m=+140.512020826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626743 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxh8f\" (UniqueName: \"kubernetes.io/projected/d5d6426f-e798-4091-8a51-7ad22cff4892-kube-api-access-vxh8f\") pod \"ingress-canary-p8xd7\" (UID: \"d5d6426f-e798-4091-8a51-7ad22cff4892\") " pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626786 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-stats-auth\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626811 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/aa2957da-0094-474f-809f-5fbce73202c6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626850 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ftcn\" (UniqueName: \"kubernetes.io/projected/67bbfb91-6c12-4b68-9a20-33f62381f57f-kube-api-access-9ftcn\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626894 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626921 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626950 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdljv\" (UniqueName: \"kubernetes.io/projected/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-kube-api-access-mdljv\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.626974 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e21465b-c285-4623-9566-f4998c280e16-audit-dir\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627001 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-registry-certificates\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627028 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-csi-data-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627052 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd7gl\" (UniqueName: \"kubernetes.io/projected/3aa5b308-f022-46f4-8952-a2fa8815572c-kube-api-access-qd7gl\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627053 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-serving-cert\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627101 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlm2p\" (UniqueName: \"kubernetes.io/projected/3e2712fd-5e82-4d9a-95c8-07943da9ef18-kube-api-access-dlm2p\") pod \"migrator-59844c95c7-vhlmb\" (UID: \"3e2712fd-5e82-4d9a-95c8-07943da9ef18\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627133 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkcv4\" (UniqueName: \"kubernetes.io/projected/dd6cda0e-b3b1-41d3-b01d-e14d84508259-kube-api-access-wkcv4\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627174 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-auth-proxy-config\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627197 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627249 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627279 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/68b9c21a-f80e-4ae9-8bbe-63c3af244602-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627302 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d5d6426f-e798-4091-8a51-7ad22cff4892-cert\") pod \"ingress-canary-p8xd7\" (UID: \"d5d6426f-e798-4091-8a51-7ad22cff4892\") " pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627322 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627344 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/98b3f18b-4b82-4b50-ac67-31ace23273f3-trusted-ca\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627368 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/98b3f18b-4b82-4b50-ac67-31ace23273f3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627462 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-service-ca-bundle\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627494 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f77438bb-ad38-4daa-ba55-108543030e57-proxy-tls\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627520 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: E1125 18:15:09.627597 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.127575521 +0000 UTC m=+140.513089126 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627666 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfj9c\" (UniqueName: \"kubernetes.io/projected/c0eb0280-e40f-4372-9b58-ee586e7cf494-kube-api-access-vfj9c\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627703 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g4vp\" (UniqueName: \"kubernetes.io/projected/6627639e-ac72-44d9-a2f5-837d5244688c-kube-api-access-9g4vp\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627731 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0dc9e969-666a-4b3b-ab6d-4b503e54c481-serving-cert\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627761 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-mountpoint-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627809 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ee4dcba5-cece-4763-93e2-3e08ab0b883b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.627981 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5bzr\" (UniqueName: \"kubernetes.io/projected/aa2957da-0094-474f-809f-5fbce73202c6-kube-api-access-k5bzr\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.628024 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5tkw\" (UniqueName: \"kubernetes.io/projected/ca675b57-be0b-4dd2-9d94-7f25262c885d-kube-api-access-x5tkw\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.628049 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/dd6cda0e-b3b1-41d3-b01d-e14d84508259-srv-cert\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.628089 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-registry-tls\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.628118 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3aa5b308-f022-46f4-8952-a2fa8815572c-webhook-cert\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.628440 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-oauth-config\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629272 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-registration-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629328 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629363 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42fr6\" (UniqueName: \"kubernetes.io/projected/8fd6e266-93f5-4345-914e-b10b90cd4378-kube-api-access-42fr6\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629409 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-trusted-ca\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629435 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68b9c21a-f80e-4ae9-8bbe-63c3af244602-config\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629453 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/dd6cda0e-b3b1-41d3-b01d-e14d84508259-profile-collector-cert\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629479 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f77438bb-ad38-4daa-ba55-108543030e57-images\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629497 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-socket-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629529 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/881e35fe-f917-461a-a1d6-804e58b5b740-installation-pull-secrets\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629547 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qpkmq\" (UID: \"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629596 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d189b2f0-78ae-4cfb-8965-eb98399c8de8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629617 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/aa2957da-0094-474f-809f-5fbce73202c6-srv-cert\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629649 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629672 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4gmq\" (UniqueName: \"kubernetes.io/projected/b6560e66-aef6-4fd2-b808-4bdfaad6b992-kube-api-access-h4gmq\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629692 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-audit-policies\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629739 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcrhd\" (UniqueName: \"kubernetes.io/projected/864adf3d-a017-4eac-944b-5aced3d2d765-kube-api-access-hcrhd\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629761 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w44k8\" (UniqueName: \"kubernetes.io/projected/8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b-kube-api-access-w44k8\") pod \"package-server-manager-789f6589d5-qpkmq\" (UID: \"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629779 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/864adf3d-a017-4eac-944b-5aced3d2d765-service-ca-bundle\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629811 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbfct\" (UniqueName: \"kubernetes.io/projected/f77438bb-ad38-4daa-ba55-108543030e57-kube-api-access-xbfct\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629829 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5b9m\" (UniqueName: \"kubernetes.io/projected/9a3b0d5b-a479-44ec-8144-630698bb2792-kube-api-access-p5b9m\") pod \"multus-admission-controller-857f4d67dd-bjxzl\" (UID: \"9a3b0d5b-a479-44ec-8144-630698bb2792\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629865 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-config\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629883 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629910 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6627639e-ac72-44d9-a2f5-837d5244688c-metrics-tls\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629956 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67bbfb91-6c12-4b68-9a20-33f62381f57f-serving-cert\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629975 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-metrics-certs\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.629992 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh47d\" (UniqueName: \"kubernetes.io/projected/0dc9e969-666a-4b3b-ab6d-4b503e54c481-kube-api-access-bh47d\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.633877 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/881e35fe-f917-461a-a1d6-804e58b5b740-installation-pull-secrets\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.634197 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-registry-tls\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.635275 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-trusted-ca-bundle\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.640915 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.643895 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-registry-certificates\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.650159 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-trusted-ca\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.672614 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxvtk\" (UniqueName: \"kubernetes.io/projected/5eb74999-15a1-4b88-a6b3-d325e2331e41-kube-api-access-pxvtk\") pod \"openshift-controller-manager-operator-756b6f6bc6-h8x65\" (UID: \"5eb74999-15a1-4b88-a6b3-d325e2331e41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.697335 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.704474 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzgph\" (UniqueName: \"kubernetes.io/projected/afa57566-908a-4b08-846a-c2893f683b5e-kube-api-access-xzgph\") pod \"apiserver-7bbb656c7d-flsl4\" (UID: \"afa57566-908a-4b08-846a-c2893f683b5e\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.719823 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbjl6\" (UniqueName: \"kubernetes.io/projected/7c13f08b-2870-484a-a06b-e671feb57ac4-kube-api-access-tbjl6\") pod \"route-controller-manager-6576b87f9c-9mnkw\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731280 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:09 crc kubenswrapper[4926]: E1125 18:15:09.731633 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.231595851 +0000 UTC m=+140.617109456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731704 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w44k8\" (UniqueName: \"kubernetes.io/projected/8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b-kube-api-access-w44k8\") pod \"package-server-manager-789f6589d5-qpkmq\" (UID: \"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731751 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4gmq\" (UniqueName: \"kubernetes.io/projected/b6560e66-aef6-4fd2-b808-4bdfaad6b992-kube-api-access-h4gmq\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731773 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-audit-policies\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731794 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcrhd\" (UniqueName: \"kubernetes.io/projected/864adf3d-a017-4eac-944b-5aced3d2d765-kube-api-access-hcrhd\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731813 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/864adf3d-a017-4eac-944b-5aced3d2d765-service-ca-bundle\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731840 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbfct\" (UniqueName: \"kubernetes.io/projected/f77438bb-ad38-4daa-ba55-108543030e57-kube-api-access-xbfct\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731860 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5b9m\" (UniqueName: \"kubernetes.io/projected/9a3b0d5b-a479-44ec-8144-630698bb2792-kube-api-access-p5b9m\") pod \"multus-admission-controller-857f4d67dd-bjxzl\" (UID: \"9a3b0d5b-a479-44ec-8144-630698bb2792\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731885 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-config\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731908 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731929 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6627639e-ac72-44d9-a2f5-837d5244688c-metrics-tls\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731956 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh47d\" (UniqueName: \"kubernetes.io/projected/0dc9e969-666a-4b3b-ab6d-4b503e54c481-kube-api-access-bh47d\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731977 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67bbfb91-6c12-4b68-9a20-33f62381f57f-serving-cert\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.731996 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-metrics-certs\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732036 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5h77\" (UniqueName: \"kubernetes.io/projected/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-kube-api-access-d5h77\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732068 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-cabundle\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732094 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68b9c21a-f80e-4ae9-8bbe-63c3af244602-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732117 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d189b2f0-78ae-4cfb-8965-eb98399c8de8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732169 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732192 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-config\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732215 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwplv\" (UniqueName: \"kubernetes.io/projected/455c82d6-6c13-4315-9610-a50e40fb528f-kube-api-access-rwplv\") pod \"control-plane-machine-set-operator-78cbb6b69f-x5l94\" (UID: \"455c82d6-6c13-4315-9610-a50e40fb528f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732232 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-config\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732250 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9a3b0d5b-a479-44ec-8144-630698bb2792-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-bjxzl\" (UID: \"9a3b0d5b-a479-44ec-8144-630698bb2792\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732270 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f77438bb-ad38-4daa-ba55-108543030e57-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732288 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732304 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732328 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732344 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d189b2f0-78ae-4cfb-8965-eb98399c8de8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732359 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6627639e-ac72-44d9-a2f5-837d5244688c-config-volume\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732400 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhjx8\" (UniqueName: \"kubernetes.io/projected/00045d3a-a833-44a7-87db-45cf8cfb26d1-kube-api-access-hhjx8\") pod \"downloads-7954f5f757-r4ssr\" (UID: \"00045d3a-a833-44a7-87db-45cf8cfb26d1\") " pod="openshift-console/downloads-7954f5f757-r4ssr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732427 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-plugins-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732446 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-key\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732467 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dc9e969-666a-4b3b-ab6d-4b503e54c481-config\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732505 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2xcd\" (UniqueName: \"kubernetes.io/projected/98b3f18b-4b82-4b50-ac67-31ace23273f3-kube-api-access-t2xcd\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732523 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/455c82d6-6c13-4315-9610-a50e40fb528f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-x5l94\" (UID: \"455c82d6-6c13-4315-9610-a50e40fb528f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732552 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-certs\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732568 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6560e66-aef6-4fd2-b808-4bdfaad6b992-secret-volume\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732615 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-default-certificate\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732636 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/98b3f18b-4b82-4b50-ac67-31ace23273f3-metrics-tls\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732656 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gb9s\" (UniqueName: \"kubernetes.io/projected/9bd152c8-7711-488d-a764-d8ffefe66454-kube-api-access-6gb9s\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732711 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-config\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732722 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fd6e266-93f5-4345-914e-b10b90cd4378-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732792 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3aa5b308-f022-46f4-8952-a2fa8815572c-tmpfs\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732824 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732930 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732954 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.732988 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-node-bootstrap-token\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733011 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733033 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfnzx\" (UniqueName: \"kubernetes.io/projected/3e21465b-c285-4623-9566-f4998c280e16-kube-api-access-rfnzx\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733065 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3aa5b308-f022-46f4-8952-a2fa8815572c-apiservice-cert\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733083 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ee4dcba5-cece-4763-93e2-3e08ab0b883b-proxy-tls\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733104 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd6e266-93f5-4345-914e-b10b90cd4378-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733124 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733144 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn8bc\" (UniqueName: \"kubernetes.io/projected/ee4dcba5-cece-4763-93e2-3e08ab0b883b-kube-api-access-dn8bc\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733171 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-machine-approver-tls\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733207 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxh8f\" (UniqueName: \"kubernetes.io/projected/d5d6426f-e798-4091-8a51-7ad22cff4892-kube-api-access-vxh8f\") pod \"ingress-canary-p8xd7\" (UID: \"d5d6426f-e798-4091-8a51-7ad22cff4892\") " pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733253 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-stats-auth\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733273 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/aa2957da-0094-474f-809f-5fbce73202c6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733292 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ftcn\" (UniqueName: \"kubernetes.io/projected/67bbfb91-6c12-4b68-9a20-33f62381f57f-kube-api-access-9ftcn\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733354 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733387 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733405 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdljv\" (UniqueName: \"kubernetes.io/projected/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-kube-api-access-mdljv\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733422 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e21465b-c285-4623-9566-f4998c280e16-audit-dir\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733441 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-csi-data-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733461 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd7gl\" (UniqueName: \"kubernetes.io/projected/3aa5b308-f022-46f4-8952-a2fa8815572c-kube-api-access-qd7gl\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733480 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkcv4\" (UniqueName: \"kubernetes.io/projected/dd6cda0e-b3b1-41d3-b01d-e14d84508259-kube-api-access-wkcv4\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733499 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlm2p\" (UniqueName: \"kubernetes.io/projected/3e2712fd-5e82-4d9a-95c8-07943da9ef18-kube-api-access-dlm2p\") pod \"migrator-59844c95c7-vhlmb\" (UID: \"3e2712fd-5e82-4d9a-95c8-07943da9ef18\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733522 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-auth-proxy-config\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733539 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733557 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733576 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/98b3f18b-4b82-4b50-ac67-31ace23273f3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733595 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/68b9c21a-f80e-4ae9-8bbe-63c3af244602-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733613 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d5d6426f-e798-4091-8a51-7ad22cff4892-cert\") pod \"ingress-canary-p8xd7\" (UID: \"d5d6426f-e798-4091-8a51-7ad22cff4892\") " pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733631 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733650 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/98b3f18b-4b82-4b50-ac67-31ace23273f3-trusted-ca\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733672 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-service-ca-bundle\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733690 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f77438bb-ad38-4daa-ba55-108543030e57-proxy-tls\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733708 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733730 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g4vp\" (UniqueName: \"kubernetes.io/projected/6627639e-ac72-44d9-a2f5-837d5244688c-kube-api-access-9g4vp\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733747 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0dc9e969-666a-4b3b-ab6d-4b503e54c481-serving-cert\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733769 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfj9c\" (UniqueName: \"kubernetes.io/projected/c0eb0280-e40f-4372-9b58-ee586e7cf494-kube-api-access-vfj9c\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733791 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-mountpoint-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733811 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ee4dcba5-cece-4763-93e2-3e08ab0b883b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733845 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5bzr\" (UniqueName: \"kubernetes.io/projected/aa2957da-0094-474f-809f-5fbce73202c6-kube-api-access-k5bzr\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733870 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5tkw\" (UniqueName: \"kubernetes.io/projected/ca675b57-be0b-4dd2-9d94-7f25262c885d-kube-api-access-x5tkw\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733888 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/dd6cda0e-b3b1-41d3-b01d-e14d84508259-srv-cert\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733908 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3aa5b308-f022-46f4-8952-a2fa8815572c-webhook-cert\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733925 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-registration-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733927 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b0520f02-d6e3-4491-89bd-cf765e803a79-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733943 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.733984 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42fr6\" (UniqueName: \"kubernetes.io/projected/8fd6e266-93f5-4345-914e-b10b90cd4378-kube-api-access-42fr6\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734006 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68b9c21a-f80e-4ae9-8bbe-63c3af244602-config\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734028 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/dd6cda0e-b3b1-41d3-b01d-e14d84508259-profile-collector-cert\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734046 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f77438bb-ad38-4daa-ba55-108543030e57-images\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734065 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-socket-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734092 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qpkmq\" (UID: \"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734110 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/aa2957da-0094-474f-809f-5fbce73202c6-srv-cert\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734140 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d189b2f0-78ae-4cfb-8965-eb98399c8de8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734159 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734949 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-audit-policies\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.735682 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-csi-data-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.735772 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3aa5b308-f022-46f4-8952-a2fa8815572c-tmpfs\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.735954 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.736346 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-config\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.736717 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.736825 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ee4dcba5-cece-4763-93e2-3e08ab0b883b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.736880 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67bbfb91-6c12-4b68-9a20-33f62381f57f-serving-cert\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.736990 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-plugins-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.737010 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-registration-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.737025 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-auth-proxy-config\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.737289 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f77438bb-ad38-4daa-ba55-108543030e57-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.734003 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e21465b-c285-4623-9566-f4998c280e16-audit-dir\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.737996 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-config\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.738315 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67bbfb91-6c12-4b68-9a20-33f62381f57f-service-ca-bundle\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.738718 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.739294 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-socket-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.739592 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.739598 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.740096 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ca675b57-be0b-4dd2-9d94-7f25262c885d-mountpoint-dir\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:09 crc kubenswrapper[4926]: E1125 18:15:09.740840 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.24081646 +0000 UTC m=+140.626330135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.741597 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.742397 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.742450 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/98b3f18b-4b82-4b50-ac67-31ace23273f3-metrics-tls\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.743316 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/98b3f18b-4b82-4b50-ac67-31ace23273f3-trusted-ca\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.743554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.746500 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-machine-approver-tls\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.752351 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62j64\" (UniqueName: \"kubernetes.io/projected/900fd29e-9f47-40d8-b232-fca71cd10642-kube-api-access-62j64\") pod \"apiserver-76f77b778f-4pxkr\" (UID: \"900fd29e-9f47-40d8-b232-fca71cd10642\") " pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.770582 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9ggl\" (UniqueName: \"kubernetes.io/projected/36fc28b4-ce41-44b8-a384-c5434107c068-kube-api-access-b9ggl\") pod \"cluster-samples-operator-665b6dd947-wrv6b\" (UID: \"36fc28b4-ce41-44b8-a384-c5434107c068\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.773812 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.774106 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.774155 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.774751 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.774861 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.791180 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fphc2\" (UniqueName: \"kubernetes.io/projected/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-kube-api-access-fphc2\") pod \"controller-manager-879f6c89f-gzmf7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.821851 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w774b\" (UniqueName: \"kubernetes.io/projected/e23f03ee-4637-49c8-b033-162178e9c4e6-kube-api-access-w774b\") pod \"openshift-config-operator-7777fb866f-hpnj7\" (UID: \"e23f03ee-4637-49c8-b033-162178e9c4e6\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.832129 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.833549 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74tmx\" (UniqueName: \"kubernetes.io/projected/160e9743-11c4-4436-a4e0-6c757c2e35ea-kube-api-access-74tmx\") pod \"console-operator-58897d9998-l89wr\" (UID: \"160e9743-11c4-4436-a4e0-6c757c2e35ea\") " pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.835857 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:09 crc kubenswrapper[4926]: E1125 18:15:09.836706 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.336680383 +0000 UTC m=+140.722193988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.858175 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8d2r\" (UniqueName: \"kubernetes.io/projected/18084ba7-0cc7-4aff-b740-277d5dfbd2c3-kube-api-access-s8d2r\") pod \"machine-api-operator-5694c8668f-tfmxn\" (UID: \"18084ba7-0cc7-4aff-b740-277d5dfbd2c3\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.877702 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2x77\" (UniqueName: \"kubernetes.io/projected/bf86c919-c2dd-4177-8572-fabf8822b35b-kube-api-access-f2x77\") pod \"openshift-apiserver-operator-796bbdcf4f-52bgr\" (UID: \"bf86c919-c2dd-4177-8572-fabf8822b35b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.891552 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqgvw\" (UniqueName: \"kubernetes.io/projected/f5f93306-167d-421d-90f4-3fd8652dffd1-kube-api-access-nqgvw\") pod \"etcd-operator-b45778765-dxszx\" (UID: \"f5f93306-167d-421d-90f4-3fd8652dffd1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.895573 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.902462 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.905205 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.912247 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ds75\" (UniqueName: \"kubernetes.io/projected/21ab4d7d-54a0-4c82-b742-a2e65d689b18-kube-api-access-2ds75\") pod \"dns-operator-744455d44c-hmfpp\" (UID: \"21ab4d7d-54a0-4c82-b742-a2e65d689b18\") " pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.912506 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.920979 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.932483 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.936555 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.936595 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.936601 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksh94\" (UniqueName: \"kubernetes.io/projected/b0520f02-d6e3-4491-89bd-cf765e803a79-kube-api-access-ksh94\") pod \"cluster-image-registry-operator-dc59b4c8b-s2nd6\" (UID: \"b0520f02-d6e3-4491-89bd-cf765e803a79\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.943515 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:09 crc kubenswrapper[4926]: E1125 18:15:09.943864 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.443849364 +0000 UTC m=+140.829362969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.946964 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.958462 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.961070 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d189b2f0-78ae-4cfb-8965-eb98399c8de8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.965715 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.976951 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.985389 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" Nov 25 18:15:09 crc kubenswrapper[4926]: I1125 18:15:09.997273 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.009259 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d189b2f0-78ae-4cfb-8965-eb98399c8de8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.017005 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.030310 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-metrics-certs\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.038238 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.045453 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.047106 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.547077872 +0000 UTC m=+140.932591487 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.054024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-default-certificate\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.057218 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.064810 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/864adf3d-a017-4eac-944b-5aced3d2d765-stats-auth\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.076777 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.084113 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.096487 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.100121 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.106015 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/864adf3d-a017-4eac-944b-5aced3d2d765-service-ca-bundle\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.116089 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.138244 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.150278 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.152481 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.153281 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.653262615 +0000 UTC m=+141.038776220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.158674 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.181950 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.197901 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.218920 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.237030 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.256049 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.256772 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.756734199 +0000 UTC m=+141.142247804 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.257717 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.279747 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.281342 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fd6e266-93f5-4345-914e-b10b90cd4378-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.284170 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd6e266-93f5-4345-914e-b10b90cd4378-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.306825 4926 request.go:700] Waited for 1.018840332s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.309086 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.316554 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.318271 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f77438bb-ad38-4daa-ba55-108543030e57-images\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.339551 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.359544 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.361309 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.361876 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.861860071 +0000 UTC m=+141.247373686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.364271 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f77438bb-ad38-4daa-ba55-108543030e57-proxy-tls\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.378071 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.384847 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9a3b0d5b-a479-44ec-8144-630698bb2792-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-bjxzl\" (UID: \"9a3b0d5b-a479-44ec-8144-630698bb2792\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.395339 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.397502 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 18:15:10 crc kubenswrapper[4926]: W1125 18:15:10.403558 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode23f03ee_4637_49c8_b033_162178e9c4e6.slice/crio-2a8116df1a695a16ef3db7b4eb64525b558f1ec5922fd1f5621fd29dd4e8a8f5 WatchSource:0}: Error finding container 2a8116df1a695a16ef3db7b4eb64525b558f1ec5922fd1f5621fd29dd4e8a8f5: Status 404 returned error can't find the container with id 2a8116df1a695a16ef3db7b4eb64525b558f1ec5922fd1f5621fd29dd4e8a8f5 Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.417256 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.437267 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.450125 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ee4dcba5-cece-4763-93e2-3e08ab0b883b-proxy-tls\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.457649 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.462867 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.464115 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:10.96409944 +0000 UTC m=+141.349613045 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.465892 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/aa2957da-0094-474f-809f-5fbce73202c6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.471264 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6560e66-aef6-4fd2-b808-4bdfaad6b992-secret-volume\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.481427 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.481433 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/dd6cda0e-b3b1-41d3-b01d-e14d84508259-profile-collector-cert\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.491661 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-hmfpp"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.492296 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/dd6cda0e-b3b1-41d3-b01d-e14d84508259-srv-cert\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.496411 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gzmf7"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.499528 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.502682 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.516598 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.527770 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dxszx"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.536553 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: W1125 18:15:10.541985 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5f93306_167d_421d_90f4_3fd8652dffd1.slice/crio-fa7882731e88a9119eef2e12b2cd56f1ae65959bcc7c5947ce3d75a11625249b WatchSource:0}: Error finding container fa7882731e88a9119eef2e12b2cd56f1ae65959bcc7c5947ce3d75a11625249b: Status 404 returned error can't find the container with id fa7882731e88a9119eef2e12b2cd56f1ae65959bcc7c5947ce3d75a11625249b Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.556754 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.562547 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/68b9c21a-f80e-4ae9-8bbe-63c3af244602-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.565360 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.565756 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.065740941 +0000 UTC m=+141.451254546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.576179 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.597814 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.617480 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.628420 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68b9c21a-f80e-4ae9-8bbe-63c3af244602-config\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.637003 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.646665 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/aa2957da-0094-474f-809f-5fbce73202c6-srv-cert\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.656562 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.671286 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.673495 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qpkmq\" (UID: \"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.673601 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.674832 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4pxkr"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.676214 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.677927 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.678173 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tfmxn"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.679871 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6"] Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.681875 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.176535538 +0000 UTC m=+141.562049143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.682313 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.684399 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3aa5b308-f022-46f4-8952-a2fa8815572c-apiservice-cert\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.684563 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.18455102 +0000 UTC m=+141.570064625 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: W1125 18:15:10.693215 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbf86c919_c2dd_4177_8572_fabf8822b35b.slice/crio-a9dd6917544bf7168903cde503c2160a7dd177e42ffe87c30c85f1692ecbfe6c WatchSource:0}: Error finding container a9dd6917544bf7168903cde503c2160a7dd177e42ffe87c30c85f1692ecbfe6c: Status 404 returned error can't find the container with id a9dd6917544bf7168903cde503c2160a7dd177e42ffe87c30c85f1692ecbfe6c Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.693423 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-l89wr"] Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.696484 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3aa5b308-f022-46f4-8952-a2fa8815572c-webhook-cert\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.697584 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.716788 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.733591 4926 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.733664 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6627639e-ac72-44d9-a2f5-837d5244688c-metrics-tls podName:6627639e-ac72-44d9-a2f5-837d5244688c nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.233646956 +0000 UTC m=+141.619160561 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/6627639e-ac72-44d9-a2f5-837d5244688c-metrics-tls") pod "dns-default-cfm2d" (UID: "6627639e-ac72-44d9-a2f5-837d5244688c") : failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.734854 4926 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.734913 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-cabundle podName:c0eb0280-e40f-4372-9b58-ee586e7cf494 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.234897782 +0000 UTC m=+141.620411397 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-cabundle") pod "service-ca-9c57cc56f-tfggb" (UID: "c0eb0280-e40f-4372-9b58-ee586e7cf494") : failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.735830 4926 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.735913 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-node-bootstrap-token podName:9bd152c8-7711-488d-a764-d8ffefe66454 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.235892881 +0000 UTC m=+141.621406486 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-node-bootstrap-token") pod "machine-config-server-npw6k" (UID: "9bd152c8-7711-488d-a764-d8ffefe66454") : failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736167 4926 secret.go:188] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736267 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/455c82d6-6c13-4315-9610-a50e40fb528f-control-plane-machine-set-operator-tls podName:455c82d6-6c13-4315-9610-a50e40fb528f nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.236248871 +0000 UTC m=+141.621762476 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/455c82d6-6c13-4315-9610-a50e40fb528f-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-78cbb6b69f-x5l94" (UID: "455c82d6-6c13-4315-9610-a50e40fb528f") : failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736306 4926 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.736340 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736352 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-key podName:c0eb0280-e40f-4372-9b58-ee586e7cf494 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.236345234 +0000 UTC m=+141.621858839 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-key") pod "service-ca-9c57cc56f-tfggb" (UID: "c0eb0280-e40f-4372-9b58-ee586e7cf494") : failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736753 4926 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736797 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0dc9e969-666a-4b3b-ab6d-4b503e54c481-config podName:0dc9e969-666a-4b3b-ab6d-4b503e54c481 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.236788857 +0000 UTC m=+141.622302462 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0dc9e969-666a-4b3b-ab6d-4b503e54c481-config") pod "service-ca-operator-777779d784-5z2zh" (UID: "0dc9e969-666a-4b3b-ab6d-4b503e54c481") : failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736821 4926 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736845 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-certs podName:9bd152c8-7711-488d-a764-d8ffefe66454 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.236839738 +0000 UTC m=+141.622353343 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-certs") pod "machine-config-server-npw6k" (UID: "9bd152c8-7711-488d-a764-d8ffefe66454") : failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736955 4926 configmap.go:193] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.736999 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca podName:1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.236986693 +0000 UTC m=+141.622500488 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca") pod "marketplace-operator-79b997595-m6qg7" (UID: "1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f") : failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.737268 4926 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.737315 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume podName:b6560e66-aef6-4fd2-b808-4bdfaad6b992 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.237305772 +0000 UTC m=+141.622819377 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume") pod "collect-profiles-29401575-pzfcs" (UID: "b6560e66-aef6-4fd2-b808-4bdfaad6b992") : failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.737493 4926 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.737524 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6627639e-ac72-44d9-a2f5-837d5244688c-config-volume podName:6627639e-ac72-44d9-a2f5-837d5244688c nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.237514508 +0000 UTC m=+141.623028113 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/6627639e-ac72-44d9-a2f5-837d5244688c-config-volume") pod "dns-default-cfm2d" (UID: "6627639e-ac72-44d9-a2f5-837d5244688c") : failed to sync configmap cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.738723 4926 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.738772 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d5d6426f-e798-4091-8a51-7ad22cff4892-cert podName:d5d6426f-e798-4091-8a51-7ad22cff4892 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.238759724 +0000 UTC m=+141.624273329 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d5d6426f-e798-4091-8a51-7ad22cff4892-cert") pod "ingress-canary-p8xd7" (UID: "d5d6426f-e798-4091-8a51-7ad22cff4892") : failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.739277 4926 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.739315 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc9e969-666a-4b3b-ab6d-4b503e54c481-serving-cert podName:0dc9e969-666a-4b3b-ab6d-4b503e54c481 nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.23930633 +0000 UTC m=+141.624819935 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0dc9e969-666a-4b3b-ab6d-4b503e54c481-serving-cert") pod "service-ca-operator-777779d784-5z2zh" (UID: "0dc9e969-666a-4b3b-ab6d-4b503e54c481") : failed to sync secret cache: timed out waiting for the condition Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.742236 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.765746 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.777161 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.783814 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.783957 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.283934336 +0000 UTC m=+141.669447941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.784253 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.784629 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.284621485 +0000 UTC m=+141.670135080 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.797527 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.816411 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.837228 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.856938 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.876903 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.885056 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.885185 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.385162405 +0000 UTC m=+141.770676010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.885311 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.885853 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.385829204 +0000 UTC m=+141.771342809 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.895963 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.916879 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.936833 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.957869 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.976798 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.986342 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.988443 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.488424153 +0000 UTC m=+141.873937758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.988797 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:10 crc kubenswrapper[4926]: E1125 18:15:10.989285 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.489228236 +0000 UTC m=+141.874741841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:10 crc kubenswrapper[4926]: I1125 18:15:10.998587 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.018158 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.036945 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.057430 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.058879 4926 generic.go:334] "Generic (PLEG): container finished" podID="e23f03ee-4637-49c8-b033-162178e9c4e6" containerID="a3d04cc4d0f2f06393c58b63ef765565782cc933506b5596bf682fe9bff3da5c" exitCode=0 Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.059454 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" event={"ID":"e23f03ee-4637-49c8-b033-162178e9c4e6","Type":"ContainerDied","Data":"a3d04cc4d0f2f06393c58b63ef765565782cc933506b5596bf682fe9bff3da5c"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.059595 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" event={"ID":"e23f03ee-4637-49c8-b033-162178e9c4e6","Type":"ContainerStarted","Data":"2a8116df1a695a16ef3db7b4eb64525b558f1ec5922fd1f5621fd29dd4e8a8f5"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.063994 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" event={"ID":"bf86c919-c2dd-4177-8572-fabf8822b35b","Type":"ContainerStarted","Data":"1827e9edbf700c3aad5109d859f24906a6baa55b2503b72bb7b34a6abc8221f3"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.064075 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" event={"ID":"bf86c919-c2dd-4177-8572-fabf8822b35b","Type":"ContainerStarted","Data":"a9dd6917544bf7168903cde503c2160a7dd177e42ffe87c30c85f1692ecbfe6c"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.066979 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" event={"ID":"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7","Type":"ContainerStarted","Data":"f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.067049 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" event={"ID":"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7","Type":"ContainerStarted","Data":"c716366dbb80fb85768ded628f37e048bfce8e03ece8dfbb55e74df861f41f97"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.067391 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.068913 4926 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-gzmf7 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.069395 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" podUID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.069951 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" event={"ID":"900fd29e-9f47-40d8-b232-fca71cd10642","Type":"ContainerStarted","Data":"9569b18f78b23261cc266c711b55ffc774fb9fbb76456fd6fac99d1d2494c156"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.071645 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-l89wr" event={"ID":"160e9743-11c4-4436-a4e0-6c757c2e35ea","Type":"ContainerStarted","Data":"a9a84e822ab4530195c9736d049970030a565a05f5d85ac83a7c7c1a84e7f2b4"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.074227 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" event={"ID":"5eb74999-15a1-4b88-a6b3-d325e2331e41","Type":"ContainerStarted","Data":"a1b3433c85cddca8beb2c57722771b33614c0815f2d4d5f21b72f4e71a3e9271"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.074255 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" event={"ID":"5eb74999-15a1-4b88-a6b3-d325e2331e41","Type":"ContainerStarted","Data":"a0f28469649f9216403e4955f629af8705dd95c5e6960df7a5a732887883680e"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.075989 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.077344 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" event={"ID":"f5f93306-167d-421d-90f4-3fd8652dffd1","Type":"ContainerStarted","Data":"a004b2b41a3b93d478ace0efde6a2e6070c2fa9a50e0651114672fb704ce6337"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.077392 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" event={"ID":"f5f93306-167d-421d-90f4-3fd8652dffd1","Type":"ContainerStarted","Data":"fa7882731e88a9119eef2e12b2cd56f1ae65959bcc7c5947ce3d75a11625249b"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.082222 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" event={"ID":"18084ba7-0cc7-4aff-b740-277d5dfbd2c3","Type":"ContainerStarted","Data":"fd4d624923d0a00e5e80927d11bfd3b1540585a99f3445c66f0349cd4b5b2cc4"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.087183 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" event={"ID":"21ab4d7d-54a0-4c82-b742-a2e65d689b18","Type":"ContainerStarted","Data":"f3249d04c739b61a03914e3b3c25e07995bf04b265080a64bff302a8eedcfec4"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.087263 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" event={"ID":"21ab4d7d-54a0-4c82-b742-a2e65d689b18","Type":"ContainerStarted","Data":"e8b9cd80769f423c5430fd9062071f7ce6f2c56564a79ef9238e63e60c3f3aa8"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.091331 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.092404 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.592358581 +0000 UTC m=+141.977872326 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.092562 4926 generic.go:334] "Generic (PLEG): container finished" podID="afa57566-908a-4b08-846a-c2893f683b5e" containerID="21ee24f25051c0fe311c8221c0b24a46e0af282a4dd5db2b8c5fbfb0921bb114" exitCode=0 Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.092635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" event={"ID":"afa57566-908a-4b08-846a-c2893f683b5e","Type":"ContainerDied","Data":"21ee24f25051c0fe311c8221c0b24a46e0af282a4dd5db2b8c5fbfb0921bb114"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.092663 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" event={"ID":"afa57566-908a-4b08-846a-c2893f683b5e","Type":"ContainerStarted","Data":"eeb384d1a559ed67d8cd6afcca095420f06ec0ce3deadc9d11c338cec80f2d4b"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.100152 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" event={"ID":"7c13f08b-2870-484a-a06b-e671feb57ac4","Type":"ContainerStarted","Data":"e350e93159c1f15587c9fec20503a325dd462b37dec016b697a2acedb2ceab7a"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.100927 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.102242 4926 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-9mnkw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.102293 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" podUID="7c13f08b-2870-484a-a06b-e671feb57ac4" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.102776 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.104970 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" event={"ID":"b0520f02-d6e3-4491-89bd-cf765e803a79","Type":"ContainerStarted","Data":"dc784cb50f6a741f63de1f870fe63e8ff6451388968adee74495f74212ffaf45"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.105001 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" event={"ID":"b0520f02-d6e3-4491-89bd-cf765e803a79","Type":"ContainerStarted","Data":"0da51673d82178206307641a55d09d948ea8c3bdf09e8b43fe73e04fffd24091"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.116532 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" event={"ID":"36fc28b4-ce41-44b8-a384-c5434107c068","Type":"ContainerStarted","Data":"0e1da6a01125d1e5ab4b62952f984200b3d053e8cec05d3f97898cc58f2434a4"} Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.117039 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.137103 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.156717 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.178011 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.194051 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.197075 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.197279 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.697260336 +0000 UTC m=+142.082773941 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.216528 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.237615 4926 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.256538 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.277039 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295052 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.295152 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.795132298 +0000 UTC m=+142.180645903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295258 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-node-bootstrap-token\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295410 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295517 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295576 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d5d6426f-e798-4091-8a51-7ad22cff4892-cert\") pod \"ingress-canary-p8xd7\" (UID: \"d5d6426f-e798-4091-8a51-7ad22cff4892\") " pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295676 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0dc9e969-666a-4b3b-ab6d-4b503e54c481-serving-cert\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.295707 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.795696615 +0000 UTC m=+142.181210220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295830 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6627639e-ac72-44d9-a2f5-837d5244688c-metrics-tls\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295893 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-cabundle\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295950 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6627639e-ac72-44d9-a2f5-837d5244688c-config-volume\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.295987 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.296022 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-key\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.296042 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dc9e969-666a-4b3b-ab6d-4b503e54c481-config\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.296095 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/455c82d6-6c13-4315-9610-a50e40fb528f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-x5l94\" (UID: \"455c82d6-6c13-4315-9610-a50e40fb528f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.296130 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-certs\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.297211 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.297712 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6627639e-ac72-44d9-a2f5-837d5244688c-config-volume\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.298113 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-cabundle\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.298233 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dc9e969-666a-4b3b-ab6d-4b503e54c481-config\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.298868 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.300847 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.302185 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0dc9e969-666a-4b3b-ab6d-4b503e54c481-serving-cert\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.302195 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d5d6426f-e798-4091-8a51-7ad22cff4892-cert\") pod \"ingress-canary-p8xd7\" (UID: \"d5d6426f-e798-4091-8a51-7ad22cff4892\") " pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.303153 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c0eb0280-e40f-4372-9b58-ee586e7cf494-signing-key\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.303719 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-certs\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.304489 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/455c82d6-6c13-4315-9610-a50e40fb528f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-x5l94\" (UID: \"455c82d6-6c13-4315-9610-a50e40fb528f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.305006 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6627639e-ac72-44d9-a2f5-837d5244688c-metrics-tls\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.315348 4926 request.go:700] Waited for 1.93732728s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dnode-bootstrapper-token&limit=500&resourceVersion=0 Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.317755 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.331043 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9bd152c8-7711-488d-a764-d8ffefe66454-node-bootstrap-token\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.373879 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-bound-sa-token\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.393242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvvjj\" (UniqueName: \"kubernetes.io/projected/e67bd5e5-a3c9-4576-93e6-6d7073142160-kube-api-access-cvvjj\") pod \"console-f9d7485db-597mc\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.398870 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.398970 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.898948912 +0000 UTC m=+142.284462517 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.399841 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.400361 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:11.900346493 +0000 UTC m=+142.285860098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.412165 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9zbl\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-kube-api-access-k9zbl\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.441086 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4gmq\" (UniqueName: \"kubernetes.io/projected/b6560e66-aef6-4fd2-b808-4bdfaad6b992-kube-api-access-h4gmq\") pod \"collect-profiles-29401575-pzfcs\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.460196 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w44k8\" (UniqueName: \"kubernetes.io/projected/8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b-kube-api-access-w44k8\") pod \"package-server-manager-789f6589d5-qpkmq\" (UID: \"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.473882 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6f6bf9f0-4263-465e-bb8c-36e52edbaa3e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jdqpq\" (UID: \"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.483960 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.500891 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.501161 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.001132779 +0000 UTC m=+142.386646654 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.501577 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.501930 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.001917742 +0000 UTC m=+142.387431347 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.502545 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh47d\" (UniqueName: \"kubernetes.io/projected/0dc9e969-666a-4b3b-ab6d-4b503e54c481-kube-api-access-bh47d\") pod \"service-ca-operator-777779d784-5z2zh\" (UID: \"0dc9e969-666a-4b3b-ab6d-4b503e54c481\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.516976 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.528563 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5h77\" (UniqueName: \"kubernetes.io/projected/a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8-kube-api-access-d5h77\") pod \"machine-approver-56656f9798-7v5gc\" (UID: \"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.532175 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68b9c21a-f80e-4ae9-8bbe-63c3af244602-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pt8g4\" (UID: \"68b9c21a-f80e-4ae9-8bbe-63c3af244602\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.555545 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhjx8\" (UniqueName: \"kubernetes.io/projected/00045d3a-a833-44a7-87db-45cf8cfb26d1-kube-api-access-hhjx8\") pod \"downloads-7954f5f757-r4ssr\" (UID: \"00045d3a-a833-44a7-87db-45cf8cfb26d1\") " pod="openshift-console/downloads-7954f5f757-r4ssr" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.558932 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.572653 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbfct\" (UniqueName: \"kubernetes.io/projected/f77438bb-ad38-4daa-ba55-108543030e57-kube-api-access-xbfct\") pod \"machine-config-operator-74547568cd-2g56h\" (UID: \"f77438bb-ad38-4daa-ba55-108543030e57\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.579633 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.594171 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5b9m\" (UniqueName: \"kubernetes.io/projected/9a3b0d5b-a479-44ec-8144-630698bb2792-kube-api-access-p5b9m\") pod \"multus-admission-controller-857f4d67dd-bjxzl\" (UID: \"9a3b0d5b-a479-44ec-8144-630698bb2792\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.602451 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.603013 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.102996537 +0000 UTC m=+142.488510142 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.603543 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.624279 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd7gl\" (UniqueName: \"kubernetes.io/projected/3aa5b308-f022-46f4-8952-a2fa8815572c-kube-api-access-qd7gl\") pod \"packageserver-d55dfcdfc-xjm9m\" (UID: \"3aa5b308-f022-46f4-8952-a2fa8815572c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.628791 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.644593 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkcv4\" (UniqueName: \"kubernetes.io/projected/dd6cda0e-b3b1-41d3-b01d-e14d84508259-kube-api-access-wkcv4\") pod \"catalog-operator-68c6474976-gkjwp\" (UID: \"dd6cda0e-b3b1-41d3-b01d-e14d84508259\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.663769 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlm2p\" (UniqueName: \"kubernetes.io/projected/3e2712fd-5e82-4d9a-95c8-07943da9ef18-kube-api-access-dlm2p\") pod \"migrator-59844c95c7-vhlmb\" (UID: \"3e2712fd-5e82-4d9a-95c8-07943da9ef18\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.688142 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwplv\" (UniqueName: \"kubernetes.io/projected/455c82d6-6c13-4315-9610-a50e40fb528f-kube-api-access-rwplv\") pod \"control-plane-machine-set-operator-78cbb6b69f-x5l94\" (UID: \"455c82d6-6c13-4315-9610-a50e40fb528f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.697602 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfnzx\" (UniqueName: \"kubernetes.io/projected/3e21465b-c285-4623-9566-f4998c280e16-kube-api-access-rfnzx\") pod \"oauth-openshift-558db77b4-w7m5b\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.715021 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.715625 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.215594726 +0000 UTC m=+142.601108331 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.721723 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2xcd\" (UniqueName: \"kubernetes.io/projected/98b3f18b-4b82-4b50-ac67-31ace23273f3-kube-api-access-t2xcd\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.734833 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq"] Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.741841 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-r4ssr" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.750418 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5bzr\" (UniqueName: \"kubernetes.io/projected/aa2957da-0094-474f-809f-5fbce73202c6-kube-api-access-k5bzr\") pod \"olm-operator-6b444d44fb-ftsfp\" (UID: \"aa2957da-0094-474f-809f-5fbce73202c6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.758155 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5tkw\" (UniqueName: \"kubernetes.io/projected/ca675b57-be0b-4dd2-9d94-7f25262c885d-kube-api-access-x5tkw\") pod \"csi-hostpathplugin-nqxtc\" (UID: \"ca675b57-be0b-4dd2-9d94-7f25262c885d\") " pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.758937 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.769357 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.777631 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gb9s\" (UniqueName: \"kubernetes.io/projected/9bd152c8-7711-488d-a764-d8ffefe66454-kube-api-access-6gb9s\") pod \"machine-config-server-npw6k\" (UID: \"9bd152c8-7711-488d-a764-d8ffefe66454\") " pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.797994 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42fr6\" (UniqueName: \"kubernetes.io/projected/8fd6e266-93f5-4345-914e-b10b90cd4378-kube-api-access-42fr6\") pod \"kube-storage-version-migrator-operator-b67b599dd-xx679\" (UID: \"8fd6e266-93f5-4345-914e-b10b90cd4378\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.813590 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.815109 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.815866 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.837039 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.337002501 +0000 UTC m=+142.722516106 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.837027 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d189b2f0-78ae-4cfb-8965-eb98399c8de8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-bglkt\" (UID: \"d189b2f0-78ae-4cfb-8965-eb98399c8de8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.837043 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.837216 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.844755 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-597mc"] Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.853128 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.853665 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/98b3f18b-4b82-4b50-ac67-31ace23273f3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zb4gn\" (UID: \"98b3f18b-4b82-4b50-ac67-31ace23273f3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.854272 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4"] Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.860766 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g4vp\" (UniqueName: \"kubernetes.io/projected/6627639e-ac72-44d9-a2f5-837d5244688c-kube-api-access-9g4vp\") pod \"dns-default-cfm2d\" (UID: \"6627639e-ac72-44d9-a2f5-837d5244688c\") " pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.869107 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.888209 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn8bc\" (UniqueName: \"kubernetes.io/projected/ee4dcba5-cece-4763-93e2-3e08ab0b883b-kube-api-access-dn8bc\") pod \"machine-config-controller-84d6567774-sc7cs\" (UID: \"ee4dcba5-cece-4763-93e2-3e08ab0b883b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.891476 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.898588 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfj9c\" (UniqueName: \"kubernetes.io/projected/c0eb0280-e40f-4372-9b58-ee586e7cf494-kube-api-access-vfj9c\") pod \"service-ca-9c57cc56f-tfggb\" (UID: \"c0eb0280-e40f-4372-9b58-ee586e7cf494\") " pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:11 crc kubenswrapper[4926]: W1125 18:15:11.904407 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68b9c21a_f80e_4ae9_8bbe_63c3af244602.slice/crio-8184cd54f9c29ae127069d12d0afe1b210f38e528b4c92c89d6ba208a9c0823a WatchSource:0}: Error finding container 8184cd54f9c29ae127069d12d0afe1b210f38e528b4c92c89d6ba208a9c0823a: Status 404 returned error can't find the container with id 8184cd54f9c29ae127069d12d0afe1b210f38e528b4c92c89d6ba208a9c0823a Nov 25 18:15:11 crc kubenswrapper[4926]: W1125 18:15:11.906305 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode67bd5e5_a3c9_4576_93e6_6d7073142160.slice/crio-7e8c00e1d22bcb05f3bb0c40bfcf4c745ce7dbcf2a8fe9c5a632de959accb9d0 WatchSource:0}: Error finding container 7e8c00e1d22bcb05f3bb0c40bfcf4c745ce7dbcf2a8fe9c5a632de959accb9d0: Status 404 returned error can't find the container with id 7e8c00e1d22bcb05f3bb0c40bfcf4c745ce7dbcf2a8fe9c5a632de959accb9d0 Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.911683 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.913593 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq"] Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.922661 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:11 crc kubenswrapper[4926]: E1125 18:15:11.923125 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.423111852 +0000 UTC m=+142.808625457 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.925366 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.927394 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdljv\" (UniqueName: \"kubernetes.io/projected/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-kube-api-access-mdljv\") pod \"marketplace-operator-79b997595-m6qg7\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.945802 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxh8f\" (UniqueName: \"kubernetes.io/projected/d5d6426f-e798-4091-8a51-7ad22cff4892-kube-api-access-vxh8f\") pod \"ingress-canary-p8xd7\" (UID: \"d5d6426f-e798-4091-8a51-7ad22cff4892\") " pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.946174 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.971689 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.979779 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-npw6k" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.985015 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ftcn\" (UniqueName: \"kubernetes.io/projected/67bbfb91-6c12-4b68-9a20-33f62381f57f-kube-api-access-9ftcn\") pod \"authentication-operator-69f744f599-dzlmz\" (UID: \"67bbfb91-6c12-4b68-9a20-33f62381f57f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.994681 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs"] Nov 25 18:15:11 crc kubenswrapper[4926]: I1125 18:15:11.997004 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcrhd\" (UniqueName: \"kubernetes.io/projected/864adf3d-a017-4eac-944b-5aced3d2d765-kube-api-access-hcrhd\") pod \"router-default-5444994796-p5t89\" (UID: \"864adf3d-a017-4eac-944b-5aced3d2d765\") " pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.006971 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.028101 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.028315 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.528285875 +0000 UTC m=+142.913799480 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.029150 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.029558 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.529546272 +0000 UTC m=+142.915059877 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: W1125 18:15:12.059830 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3fbc7b8_0f81_44b9_8890_5f6c124f2fa8.slice/crio-4e9bf9b194ea32bac2532a3b6fff2eeddd378635dd166d1babd1d569cab33040 WatchSource:0}: Error finding container 4e9bf9b194ea32bac2532a3b6fff2eeddd378635dd166d1babd1d569cab33040: Status 404 returned error can't find the container with id 4e9bf9b194ea32bac2532a3b6fff2eeddd378635dd166d1babd1d569cab33040 Nov 25 18:15:12 crc kubenswrapper[4926]: W1125 18:15:12.064036 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6560e66_aef6_4fd2_b808_4bdfaad6b992.slice/crio-6a6ab94847dc7c653e0540ee93c87294a9e41093d690686f420ba66e5f7139f7 WatchSource:0}: Error finding container 6a6ab94847dc7c653e0540ee93c87294a9e41093d690686f420ba66e5f7139f7: Status 404 returned error can't find the container with id 6a6ab94847dc7c653e0540ee93c87294a9e41093d690686f420ba66e5f7139f7 Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.064105 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.076718 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" Nov 25 18:15:12 crc kubenswrapper[4926]: W1125 18:15:12.080448 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0dc9e969_666a_4b3b_ab6d_4b503e54c481.slice/crio-88049c34965615ddbc1b1425495aef30cfc67cb0b22fae8e72a48ed7548c2658 WatchSource:0}: Error finding container 88049c34965615ddbc1b1425495aef30cfc67cb0b22fae8e72a48ed7548c2658: Status 404 returned error can't find the container with id 88049c34965615ddbc1b1425495aef30cfc67cb0b22fae8e72a48ed7548c2658 Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.096931 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.099150 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.122967 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-w7m5b"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.129831 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.130236 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.630221954 +0000 UTC m=+143.015735559 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.134677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" event={"ID":"18084ba7-0cc7-4aff-b740-277d5dfbd2c3","Type":"ContainerStarted","Data":"ffaf816de46053853d584cf98ce1bda33009a67d6ac46a40afbb36d2be0c6899"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.134717 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" event={"ID":"18084ba7-0cc7-4aff-b740-277d5dfbd2c3","Type":"ContainerStarted","Data":"2389d6958b5f21f3ca619db1ac4d0bd600fcd9d2839f789261c667897b8bbe12"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.141422 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.143983 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" event={"ID":"afa57566-908a-4b08-846a-c2893f683b5e","Type":"ContainerStarted","Data":"4c5ce8ed3043d1c7b8b29cc060843df4f4ede7646868647b2ce42e3d67d5b2ba"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.154793 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" event={"ID":"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b","Type":"ContainerStarted","Data":"c82c8633e62e2d9fbc45d58e77027143c57759d9396d0736626ee5b34dd6c3de"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.158726 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-r4ssr"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.161531 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" event={"ID":"e23f03ee-4637-49c8-b033-162178e9c4e6","Type":"ContainerStarted","Data":"472f0407bee8d6f0cdc0adc419bb427790d37866be70efee589dfb52bf645426"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.161843 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.163010 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-597mc" event={"ID":"e67bd5e5-a3c9-4576-93e6-6d7073142160","Type":"ContainerStarted","Data":"7e8c00e1d22bcb05f3bb0c40bfcf4c745ce7dbcf2a8fe9c5a632de959accb9d0"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.163825 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" event={"ID":"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e","Type":"ContainerStarted","Data":"0f764586e0d6bf915632adb3816d2847b5a2dc3a21a3851b4b5861cf48af119c"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.179635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" event={"ID":"7c13f08b-2870-484a-a06b-e671feb57ac4","Type":"ContainerStarted","Data":"b646afd9c46ed02427c6142bcb0b90c40289e89566f0952236bea38f460994ec"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.180816 4926 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-9mnkw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.180864 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" podUID="7c13f08b-2870-484a-a06b-e671feb57ac4" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.189354 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" event={"ID":"68b9c21a-f80e-4ae9-8bbe-63c3af244602","Type":"ContainerStarted","Data":"8184cd54f9c29ae127069d12d0afe1b210f38e528b4c92c89d6ba208a9c0823a"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.194802 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.218168 4926 generic.go:334] "Generic (PLEG): container finished" podID="900fd29e-9f47-40d8-b232-fca71cd10642" containerID="1b8e14aac7039a649df1b92d873cf9a4668ab61cb6334ba3033b191dd2ef3196" exitCode=0 Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.218496 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" event={"ID":"900fd29e-9f47-40d8-b232-fca71cd10642","Type":"ContainerDied","Data":"1b8e14aac7039a649df1b92d873cf9a4668ab61cb6334ba3033b191dd2ef3196"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.237048 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.237707 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-l89wr" event={"ID":"160e9743-11c4-4436-a4e0-6c757c2e35ea","Type":"ContainerStarted","Data":"dd077725c7314f2949e9661eae39a089c4faaf27935cebf152f0684bb2b336dc"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.238806 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.239780 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.739762135 +0000 UTC m=+143.125275920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.240981 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-p8xd7" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.243094 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" event={"ID":"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8","Type":"ContainerStarted","Data":"4e9bf9b194ea32bac2532a3b6fff2eeddd378635dd166d1babd1d569cab33040"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.244843 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" event={"ID":"0dc9e969-666a-4b3b-ab6d-4b503e54c481","Type":"ContainerStarted","Data":"88049c34965615ddbc1b1425495aef30cfc67cb0b22fae8e72a48ed7548c2658"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.253838 4926 patch_prober.go:28] interesting pod/console-operator-58897d9998-l89wr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.253898 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-l89wr" podUID="160e9743-11c4-4436-a4e0-6c757c2e35ea" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.261154 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" event={"ID":"b6560e66-aef6-4fd2-b808-4bdfaad6b992","Type":"ContainerStarted","Data":"6a6ab94847dc7c653e0540ee93c87294a9e41093d690686f420ba66e5f7139f7"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.274942 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" event={"ID":"36fc28b4-ce41-44b8-a384-c5434107c068","Type":"ContainerStarted","Data":"8317a05d7c22dfc018fa30577baa355817d8c1b27909d3854a4f2cbfbda0d65d"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.274977 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" event={"ID":"36fc28b4-ce41-44b8-a384-c5434107c068","Type":"ContainerStarted","Data":"72dbb79fd944ee81e69e6775fa28a79d8d867f075262aba8eec8c32bb2436e63"} Nov 25 18:15:12 crc kubenswrapper[4926]: W1125 18:15:12.299026 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00045d3a_a833_44a7_87db_45cf8cfb26d1.slice/crio-37a3d42cccfa53e8fc5309d6b7bca2a6d9d53a4d15908cf122a79ed42fb9d521 WatchSource:0}: Error finding container 37a3d42cccfa53e8fc5309d6b7bca2a6d9d53a4d15908cf122a79ed42fb9d521: Status 404 returned error can't find the container with id 37a3d42cccfa53e8fc5309d6b7bca2a6d9d53a4d15908cf122a79ed42fb9d521 Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.307899 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" event={"ID":"21ab4d7d-54a0-4c82-b742-a2e65d689b18","Type":"ContainerStarted","Data":"07d49ef588342d26c8ea3407a1278be4163550f968d4f9a92b73c2039d84589f"} Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.309199 4926 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-gzmf7 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.309245 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" podUID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.340017 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.341082 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.841064487 +0000 UTC m=+143.226578102 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.359847 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.441617 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.441684 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.442061 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:12.942042108 +0000 UTC m=+143.327555713 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.488135 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.548190 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.548723 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.048703225 +0000 UTC m=+143.434216830 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: W1125 18:15:12.550739 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa2957da_0094_474f_809f_5fbce73202c6.slice/crio-590da15bbb5000890d074c76b55668956e5108f80f6e6a16691eed9821ff738b WatchSource:0}: Error finding container 590da15bbb5000890d074c76b55668956e5108f80f6e6a16691eed9821ff738b: Status 404 returned error can't find the container with id 590da15bbb5000890d074c76b55668956e5108f80f6e6a16691eed9821ff738b Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.650390 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.651326 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.151311814 +0000 UTC m=+143.536825419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.682876 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.751763 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.751883 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.251851993 +0000 UTC m=+143.637365598 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.752403 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.752766 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.25275562 +0000 UTC m=+143.638269225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.810153 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wrv6b" podStartSLOduration=123.810137726 podStartE2EDuration="2m3.810137726s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:12.783529583 +0000 UTC m=+143.169043188" watchObservedRunningTime="2025-11-25 18:15:12.810137726 +0000 UTC m=+143.195651321" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.811138 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.856066 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.856313 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.356267125 +0000 UTC m=+143.741780740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.856567 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.857178 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.357168961 +0000 UTC m=+143.742682566 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.893040 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-l89wr" podStartSLOduration=123.893019642 podStartE2EDuration="2m3.893019642s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:12.865990247 +0000 UTC m=+143.251503852" watchObservedRunningTime="2025-11-25 18:15:12.893019642 +0000 UTC m=+143.278533247" Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.893637 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h"] Nov 25 18:15:12 crc kubenswrapper[4926]: I1125 18:15:12.959949 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:12 crc kubenswrapper[4926]: E1125 18:15:12.960445 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.46043083 +0000 UTC m=+143.845944435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.008960 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-tfggb"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.019257 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-bjxzl"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.039211 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.065867 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.066704 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.566683395 +0000 UTC m=+143.952197000 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.069626 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-tfmxn" podStartSLOduration=123.069605889 podStartE2EDuration="2m3.069605889s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.066729986 +0000 UTC m=+143.452243591" watchObservedRunningTime="2025-11-25 18:15:13.069605889 +0000 UTC m=+143.455119494" Nov 25 18:15:13 crc kubenswrapper[4926]: W1125 18:15:13.140958 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf77438bb_ad38_4daa_ba55_108543030e57.slice/crio-686e337c8855a9e5b0c70227d83d14bfcd061d2091fb6fe5c336d8a736e03146 WatchSource:0}: Error finding container 686e337c8855a9e5b0c70227d83d14bfcd061d2091fb6fe5c336d8a736e03146: Status 404 returned error can't find the container with id 686e337c8855a9e5b0c70227d83d14bfcd061d2091fb6fe5c336d8a736e03146 Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.155855 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-dxszx" podStartSLOduration=123.155794782 podStartE2EDuration="2m3.155794782s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.114258506 +0000 UTC m=+143.499772121" watchObservedRunningTime="2025-11-25 18:15:13.155794782 +0000 UTC m=+143.541308387" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.156999 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" podStartSLOduration=123.156988696 podStartE2EDuration="2m3.156988696s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.151143627 +0000 UTC m=+143.536657232" watchObservedRunningTime="2025-11-25 18:15:13.156988696 +0000 UTC m=+143.542502301" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.161341 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-cfm2d"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.167415 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.167913 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.667890863 +0000 UTC m=+144.053404468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.179807 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" podStartSLOduration=123.179780588 podStartE2EDuration="2m3.179780588s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.178531972 +0000 UTC m=+143.564045597" watchObservedRunningTime="2025-11-25 18:15:13.179780588 +0000 UTC m=+143.565294193" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.233614 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" podStartSLOduration=123.23359227 podStartE2EDuration="2m3.23359227s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.226242938 +0000 UTC m=+143.611756563" watchObservedRunningTime="2025-11-25 18:15:13.23359227 +0000 UTC m=+143.619105875" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.269169 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.269546 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.769534414 +0000 UTC m=+144.155048019 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: W1125 18:15:13.337366 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a3b0d5b_a479_44ec_8144_630698bb2792.slice/crio-50f7c3fedad05daa8c512d0a9694330ab81bed3d8dc1d4470aa9537c0d365460 WatchSource:0}: Error finding container 50f7c3fedad05daa8c512d0a9694330ab81bed3d8dc1d4470aa9537c0d365460: Status 404 returned error can't find the container with id 50f7c3fedad05daa8c512d0a9694330ab81bed3d8dc1d4470aa9537c0d365460 Nov 25 18:15:13 crc kubenswrapper[4926]: W1125 18:15:13.376966 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod455c82d6_6c13_4315_9610_a50e40fb528f.slice/crio-5d8bdf463304ce1925a3e37d52ee47c2ddf9636e6c95f99689b4b81b3daf02b0 WatchSource:0}: Error finding container 5d8bdf463304ce1925a3e37d52ee47c2ddf9636e6c95f99689b4b81b3daf02b0: Status 404 returned error can't find the container with id 5d8bdf463304ce1925a3e37d52ee47c2ddf9636e6c95f99689b4b81b3daf02b0 Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.387038 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.387703 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.887686484 +0000 UTC m=+144.273200089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.491596 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.492077 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:13.992061776 +0000 UTC m=+144.377575381 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.507918 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" event={"ID":"3e2712fd-5e82-4d9a-95c8-07943da9ef18","Type":"ContainerStarted","Data":"a8f11b1ab4d81d2ac89685e041ff2e0cefaf6b33abb150a8623c45688607b0c9"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.514008 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r4ssr" event={"ID":"00045d3a-a833-44a7-87db-45cf8cfb26d1","Type":"ContainerStarted","Data":"37a3d42cccfa53e8fc5309d6b7bca2a6d9d53a4d15908cf122a79ed42fb9d521"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.524614 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-597mc" event={"ID":"e67bd5e5-a3c9-4576-93e6-6d7073142160","Type":"ContainerStarted","Data":"1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.536255 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-npw6k" event={"ID":"9bd152c8-7711-488d-a764-d8ffefe66454","Type":"ContainerStarted","Data":"83f8ab0570cf16df43935c3970d3c01e50802338229bab8b55f17641605fa0e9"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.540167 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-h8x65" podStartSLOduration=123.540132921 podStartE2EDuration="2m3.540132921s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.535498506 +0000 UTC m=+143.921012131" watchObservedRunningTime="2025-11-25 18:15:13.540132921 +0000 UTC m=+143.925646526" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.546687 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" event={"ID":"aa2957da-0094-474f-809f-5fbce73202c6","Type":"ContainerStarted","Data":"590da15bbb5000890d074c76b55668956e5108f80f6e6a16691eed9821ff738b"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.574330 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-p5t89" event={"ID":"864adf3d-a017-4eac-944b-5aced3d2d765","Type":"ContainerStarted","Data":"5f8cd3486dc6638ef9b5f937948bfed9dee030715d7cb703f26e95f0b9d093cc"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.595294 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.597394 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.097356352 +0000 UTC m=+144.482870127 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.607203 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" event={"ID":"3aa5b308-f022-46f4-8952-a2fa8815572c","Type":"ContainerStarted","Data":"50c8f6546c0b7702ab5b9d08a0549825991d647422e0c63ac97c5b1c7406eb28"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.614561 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" event={"ID":"3e21465b-c285-4623-9566-f4998c280e16","Type":"ContainerStarted","Data":"fa25fa511c2350763f1a0722695d73884c9179b95e5d60ca7671f51f4de3bca1"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.621609 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nqxtc"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.637817 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" event={"ID":"f77438bb-ad38-4daa-ba55-108543030e57","Type":"ContainerStarted","Data":"686e337c8855a9e5b0c70227d83d14bfcd061d2091fb6fe5c336d8a736e03146"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.639685 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.655454 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" event={"ID":"8fd6e266-93f5-4345-914e-b10b90cd4378","Type":"ContainerStarted","Data":"2d6a020cf07cbf1d9eb1e48e84633f1ee088ce5d7ad682105daaf7b9bbbafcf7"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.675212 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s2nd6" podStartSLOduration=123.675189332 podStartE2EDuration="2m3.675189332s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.672076432 +0000 UTC m=+144.057590037" watchObservedRunningTime="2025-11-25 18:15:13.675189332 +0000 UTC m=+144.060702937" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.683566 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" event={"ID":"dd6cda0e-b3b1-41d3-b01d-e14d84508259","Type":"ContainerStarted","Data":"94603cdae4cc90bbd4203116c46884b88d3b52a251db8426c0ed45d99d45de69"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.687155 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" event={"ID":"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b","Type":"ContainerStarted","Data":"3090c75b7cf776d1590e861300ced7b9c9b05059d4e9e81c0b906cc5f7d0628d"} Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.705700 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.706253 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.206180492 +0000 UTC m=+144.591694097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: W1125 18:15:13.707015 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca675b57_be0b_4dd2_9d94_7f25262c885d.slice/crio-c9a5b2a5e48866efb4d2c6d4a4942a8647950ddeeff4f0458b0455e8d0e30c40 WatchSource:0}: Error finding container c9a5b2a5e48866efb4d2c6d4a4942a8647950ddeeff4f0458b0455e8d0e30c40: Status 404 returned error can't find the container with id c9a5b2a5e48866efb4d2c6d4a4942a8647950ddeeff4f0458b0455e8d0e30c40 Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.760685 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-52bgr" podStartSLOduration=124.755526905 podStartE2EDuration="2m4.755526905s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.753494236 +0000 UTC m=+144.139007861" watchObservedRunningTime="2025-11-25 18:15:13.755526905 +0000 UTC m=+144.141040510" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.809960 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.811852 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.31182687 +0000 UTC m=+144.697340465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.851555 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m6qg7"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.895130 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-hmfpp" podStartSLOduration=123.895112277 podStartE2EDuration="2m3.895112277s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.857009231 +0000 UTC m=+144.242522846" watchObservedRunningTime="2025-11-25 18:15:13.895112277 +0000 UTC m=+144.280625882" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.900257 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-l89wr" Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.911915 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:13 crc kubenswrapper[4926]: E1125 18:15:13.912342 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.412329828 +0000 UTC m=+144.797843433 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.970327 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dzlmz"] Nov 25 18:15:13 crc kubenswrapper[4926]: I1125 18:15:13.992639 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" podStartSLOduration=124.992618378 podStartE2EDuration="2m4.992618378s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:13.991804015 +0000 UTC m=+144.377317620" watchObservedRunningTime="2025-11-25 18:15:13.992618378 +0000 UTC m=+144.378131983" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.009259 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs"] Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.017037 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.017621 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.517597944 +0000 UTC m=+144.903111549 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.089680 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt"] Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.124202 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.124617 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.62460016 +0000 UTC m=+145.010113765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.130239 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.233676 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.234425 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.734401419 +0000 UTC m=+145.119915024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.304675 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-p8xd7"] Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.357110 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.357802 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.857787752 +0000 UTC m=+145.243301357 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.458502 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-597mc" podStartSLOduration=125.458488635 podStartE2EDuration="2m5.458488635s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:14.389296386 +0000 UTC m=+144.774809991" watchObservedRunningTime="2025-11-25 18:15:14.458488635 +0000 UTC m=+144.844002240" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.458792 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.459324 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:14.959297949 +0000 UTC m=+145.344811554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.561044 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.561716 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.061699051 +0000 UTC m=+145.447212656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.577597 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" podStartSLOduration=14.577565562 podStartE2EDuration="14.577565562s" podCreationTimestamp="2025-11-25 18:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:14.576923884 +0000 UTC m=+144.962437509" watchObservedRunningTime="2025-11-25 18:15:14.577565562 +0000 UTC m=+144.963079167" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.663694 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.664493 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.164475156 +0000 UTC m=+145.549988761 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.748635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" event={"ID":"d189b2f0-78ae-4cfb-8965-eb98399c8de8","Type":"ContainerStarted","Data":"03499b6019754aafc78a88bf931a14a57543bd8711e2afd4f6c34052f8355f6d"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.775332 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.775761 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.275746437 +0000 UTC m=+145.661260042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.788019 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" event={"ID":"3aa5b308-f022-46f4-8952-a2fa8815572c","Type":"ContainerStarted","Data":"a2bc093056890947bed072f102e8261c5c3de8cbcc957634298290f8b73ae7bc"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.798224 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.808303 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" event={"ID":"ee4dcba5-cece-4763-93e2-3e08ab0b883b","Type":"ContainerStarted","Data":"321b64305d078e78f34b2ef6f96a3a9865e69a1685988a5aefa4c26004286028"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.830926 4926 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-xjm9m container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" start-of-body= Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.830991 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" podUID="3aa5b308-f022-46f4-8952-a2fa8815572c" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.831225 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" podStartSLOduration=124.831197457 podStartE2EDuration="2m4.831197457s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:14.830818526 +0000 UTC m=+145.216332131" watchObservedRunningTime="2025-11-25 18:15:14.831197457 +0000 UTC m=+145.216711062" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.876658 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.877005 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.376981706 +0000 UTC m=+145.762495311 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.877045 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.878128 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.378113488 +0000 UTC m=+145.763627093 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.883828 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" event={"ID":"3e2712fd-5e82-4d9a-95c8-07943da9ef18","Type":"ContainerStarted","Data":"b0cb4befc1f716bcd3f3ab72b4208ca26fc01456e5798ac9bd506a519c07dc34"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.896703 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" event={"ID":"b6560e66-aef6-4fd2-b808-4bdfaad6b992","Type":"ContainerStarted","Data":"2361a082ccf88b5a14c2b5d5ab11d7acf571ffa5267e3ae4f5325856560693ab"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.898752 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.898803 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.908249 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" event={"ID":"8fd6e266-93f5-4345-914e-b10b90cd4378","Type":"ContainerStarted","Data":"8a60e9e10e78ab70b5a29f9f7575c827b5ed2d942046d15c4d5b78aa61a8e9f1"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.919078 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" event={"ID":"f77438bb-ad38-4daa-ba55-108543030e57","Type":"ContainerStarted","Data":"cf2d0f97cad00832dd288dc80d3000e14aac9b0d629e98aa436ebd8158a9a1e0"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.939598 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" event={"ID":"6f6bf9f0-4263-465e-bb8c-36e52edbaa3e","Type":"ContainerStarted","Data":"55e2ae310ad5861b7d8e300568fbecc45f2046f8765f26f8f9c37888f6fca8c8"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.953397 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xx679" podStartSLOduration=124.953352233 podStartE2EDuration="2m4.953352233s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:14.952488748 +0000 UTC m=+145.338002353" watchObservedRunningTime="2025-11-25 18:15:14.953352233 +0000 UTC m=+145.338865838" Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.973729 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" event={"ID":"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f","Type":"ContainerStarted","Data":"e60d52f5425e271f3ed7aebeacce824d322c0976601290d627e85be4a440e43f"} Nov 25 18:15:14 crc kubenswrapper[4926]: I1125 18:15:14.978550 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:14 crc kubenswrapper[4926]: E1125 18:15:14.979245 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.479209963 +0000 UTC m=+145.864723568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.050064 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-npw6k" event={"ID":"9bd152c8-7711-488d-a764-d8ffefe66454","Type":"ContainerStarted","Data":"864ac90cdceedf0d7616b0d2f211a42f924e0d9c055896c73ac323cfa8318f9c"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.063311 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" event={"ID":"aa2957da-0094-474f-809f-5fbce73202c6","Type":"ContainerStarted","Data":"21dee06787e28f50af08d3a19ab299f73571097c12ff66b1a6219d2e13e91f5d"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.064066 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.070408 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" event={"ID":"dd6cda0e-b3b1-41d3-b01d-e14d84508259","Type":"ContainerStarted","Data":"1223fdf611c039c7af70667454cfdbe4949d139ae0b6cdf389127f0d87095513"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.076491 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.080363 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.089981 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.589930759 +0000 UTC m=+145.975444364 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.100891 4926 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-gkjwp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.100945 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" podUID="dd6cda0e-b3b1-41d3-b01d-e14d84508259" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.101135 4926 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-ftsfp container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.101209 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" podUID="aa2957da-0094-474f-809f-5fbce73202c6" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.103612 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jdqpq" podStartSLOduration=125.103600426 podStartE2EDuration="2m5.103600426s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.014811197 +0000 UTC m=+145.400324822" watchObservedRunningTime="2025-11-25 18:15:15.103600426 +0000 UTC m=+145.489114031" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.103825 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-npw6k" podStartSLOduration=6.103820262 podStartE2EDuration="6.103820262s" podCreationTimestamp="2025-11-25 18:15:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.101584767 +0000 UTC m=+145.487098372" watchObservedRunningTime="2025-11-25 18:15:15.103820262 +0000 UTC m=+145.489333867" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.132300 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" event={"ID":"ca675b57-be0b-4dd2-9d94-7f25262c885d","Type":"ContainerStarted","Data":"c9a5b2a5e48866efb4d2c6d4a4942a8647950ddeeff4f0458b0455e8d0e30c40"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.133772 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" event={"ID":"0dc9e969-666a-4b3b-ab6d-4b503e54c481","Type":"ContainerStarted","Data":"f5e0f755fe558817dd42d6120c9e27e340f5342718df835fa839adf3f6debfb2"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.135568 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-p8xd7" event={"ID":"d5d6426f-e798-4091-8a51-7ad22cff4892","Type":"ContainerStarted","Data":"96d15df27ad5156c5b12f3db01780c565a063772b28660096fc50a2fe54f4124"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.159383 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" event={"ID":"68b9c21a-f80e-4ae9-8bbe-63c3af244602","Type":"ContainerStarted","Data":"ebeec24e265dec55fe38730ddcf4b0e705818758d71e01e4d610d1138762cba9"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.169271 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.175629 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-cfm2d" event={"ID":"6627639e-ac72-44d9-a2f5-837d5244688c","Type":"ContainerStarted","Data":"08ed79b5228fab0af0230312db0434b6b295ea1b3afe27553a2aa35fbbb558b8"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.191076 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.191733 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.691703183 +0000 UTC m=+146.077216788 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.218817 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" event={"ID":"67bbfb91-6c12-4b68-9a20-33f62381f57f","Type":"ContainerStarted","Data":"ae3d7a412349c866d979e8d28ae23f9d5eb449e7da4f2f3c22bfed892164c865"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.233504 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" event={"ID":"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8","Type":"ContainerStarted","Data":"16f36fbeb07bd9ad5d9e3fb03a4ba14218c3903b027e1568fca6742928bf15b7"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.242393 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" event={"ID":"3e21465b-c285-4623-9566-f4998c280e16","Type":"ContainerStarted","Data":"2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.243254 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.250695 4926 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-w7m5b container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.25:6443/healthz\": dial tcp 10.217.0.25:6443: connect: connection refused" start-of-body= Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.250778 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" podUID="3e21465b-c285-4623-9566-f4998c280e16" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.25:6443/healthz\": dial tcp 10.217.0.25:6443: connect: connection refused" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.268733 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" podStartSLOduration=125.26870672 podStartE2EDuration="2m5.26870672s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.196134712 +0000 UTC m=+145.581648317" watchObservedRunningTime="2025-11-25 18:15:15.26870672 +0000 UTC m=+145.654220325" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.268847 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" podStartSLOduration=125.268842613 podStartE2EDuration="2m5.268842613s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.147434189 +0000 UTC m=+145.532947794" watchObservedRunningTime="2025-11-25 18:15:15.268842613 +0000 UTC m=+145.654356218" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.294679 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.297656 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" event={"ID":"455c82d6-6c13-4315-9610-a50e40fb528f","Type":"ContainerStarted","Data":"5d8bdf463304ce1925a3e37d52ee47c2ddf9636e6c95f99689b4b81b3daf02b0"} Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.298571 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.798556776 +0000 UTC m=+146.184070371 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.307662 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pt8g4" podStartSLOduration=125.30764571 podStartE2EDuration="2m5.30764571s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.305675563 +0000 UTC m=+145.691189168" watchObservedRunningTime="2025-11-25 18:15:15.30764571 +0000 UTC m=+145.693159315" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.319618 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" event={"ID":"c0eb0280-e40f-4372-9b58-ee586e7cf494","Type":"ContainerStarted","Data":"a3d95ca2347756fd610d0479d73a67698ef8a1e02db8bd9d4d94bca5a38bf763"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.337473 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r4ssr" event={"ID":"00045d3a-a833-44a7-87db-45cf8cfb26d1","Type":"ContainerStarted","Data":"9f4bd3f85521d539cfd49cf733fd9785b4e1de75aab4894776259ed47692a655"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.339265 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5z2zh" podStartSLOduration=125.339252578 podStartE2EDuration="2m5.339252578s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.33828828 +0000 UTC m=+145.723801905" watchObservedRunningTime="2025-11-25 18:15:15.339252578 +0000 UTC m=+145.724766183" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.341835 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-r4ssr" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.346984 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-r4ssr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.347024 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r4ssr" podUID="00045d3a-a833-44a7-87db-45cf8cfb26d1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.402062 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-r4ssr" podStartSLOduration=126.4020462 podStartE2EDuration="2m6.4020462s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.401044202 +0000 UTC m=+145.786557807" watchObservedRunningTime="2025-11-25 18:15:15.4020462 +0000 UTC m=+145.787559805" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.403187 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.403438 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.90340826 +0000 UTC m=+146.288921865 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.403508 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.404358 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:15.904347637 +0000 UTC m=+146.289861242 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.418295 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" event={"ID":"9a3b0d5b-a479-44ec-8144-630698bb2792","Type":"ContainerStarted","Data":"50f7c3fedad05daa8c512d0a9694330ab81bed3d8dc1d4470aa9537c0d365460"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.451188 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" podStartSLOduration=125.451169937 podStartE2EDuration="2m5.451169937s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.449911841 +0000 UTC m=+145.835425446" watchObservedRunningTime="2025-11-25 18:15:15.451169937 +0000 UTC m=+145.836683542" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.478601 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" podStartSLOduration=125.478573613 podStartE2EDuration="2m5.478573613s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.477722338 +0000 UTC m=+145.863235963" watchObservedRunningTime="2025-11-25 18:15:15.478573613 +0000 UTC m=+145.864087218" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.480205 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" event={"ID":"98b3f18b-4b82-4b50-ac67-31ace23273f3","Type":"ContainerStarted","Data":"e2cae954e8147dbe0c1ce277c560ba0bc8dcff9246d3f668f9b8bcf7fa18a17d"} Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.499030 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-flsl4" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.507101 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.508384 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.008346817 +0000 UTC m=+146.393860432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.600826 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" podStartSLOduration=126.600800901 podStartE2EDuration="2m6.600800901s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:15.549399279 +0000 UTC m=+145.934912894" watchObservedRunningTime="2025-11-25 18:15:15.600800901 +0000 UTC m=+145.986314516" Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.615762 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.616147 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.116132377 +0000 UTC m=+146.501645982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.717104 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.717611 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.217589792 +0000 UTC m=+146.603103397 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.821468 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.821940 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.321921031 +0000 UTC m=+146.707434636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.924068 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:15 crc kubenswrapper[4926]: E1125 18:15:15.924364 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.424349136 +0000 UTC m=+146.809862741 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:15 crc kubenswrapper[4926]: I1125 18:15:15.968976 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hpnj7" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.029723 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.030353 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.530341433 +0000 UTC m=+146.915855038 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.139209 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.140516 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.640499821 +0000 UTC m=+147.026013426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.248665 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.249023 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.749011682 +0000 UTC m=+147.134525277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.349487 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.372508 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.872477187 +0000 UTC m=+147.257990782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.451978 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.452488 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:16.952471469 +0000 UTC m=+147.337985074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.518658 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" event={"ID":"d189b2f0-78ae-4cfb-8965-eb98399c8de8","Type":"ContainerStarted","Data":"4c3c29b7262dde680953dc93c8c1b8661fcaa51ba6dd526128b67817ad5d9346"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.535483 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" event={"ID":"3e2712fd-5e82-4d9a-95c8-07943da9ef18","Type":"ContainerStarted","Data":"240fab3af214f16ebd4331365f14973b5d30e3e422e15cedb317f1836945b320"} Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.553681 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.053651116 +0000 UTC m=+147.439170562 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.553537 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.553930 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.554238 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.054231124 +0000 UTC m=+147.439744729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.561033 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" event={"ID":"98b3f18b-4b82-4b50-ac67-31ace23273f3","Type":"ContainerStarted","Data":"f0131c2a697e7f2721e78718ac309b7ce6c6379698a368d77da05deea404b604"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.561090 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" event={"ID":"98b3f18b-4b82-4b50-ac67-31ace23273f3","Type":"ContainerStarted","Data":"cb85beba5cea1182d7e577c75839ad0af52cada1ca6056ef7b891d3dae439a94"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.580662 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x5l94" event={"ID":"455c82d6-6c13-4315-9610-a50e40fb528f","Type":"ContainerStarted","Data":"bd310ca42b87a3fb24a8093e11d79e621373de7fa7a12e7d6e6684d9cfc37cdd"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.592416 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-bglkt" podStartSLOduration=126.592400272 podStartE2EDuration="2m6.592400272s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.553640216 +0000 UTC m=+146.939153821" watchObservedRunningTime="2025-11-25 18:15:16.592400272 +0000 UTC m=+146.977913867" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.592993 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vhlmb" podStartSLOduration=126.592987449 podStartE2EDuration="2m6.592987449s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.591833005 +0000 UTC m=+146.977346610" watchObservedRunningTime="2025-11-25 18:15:16.592987449 +0000 UTC m=+146.978501044" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.593323 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-tfggb" event={"ID":"c0eb0280-e40f-4372-9b58-ee586e7cf494","Type":"ContainerStarted","Data":"add273085609ee6aa5dbcb69b481af65ad96c38877fd22306c7fa0f7a46fafc1"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.612970 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" event={"ID":"8fd6f0a5-689c-4217-9730-6fdbaa3bcf1b","Type":"ContainerStarted","Data":"293df5de9493571e40d3c2fcc3111b9e46a481fea31570be9a9ae882aa2b0819"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.613707 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.636172 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" event={"ID":"9a3b0d5b-a479-44ec-8144-630698bb2792","Type":"ContainerStarted","Data":"c8da99461848d69cceaf89e20606bce134bb2a1eb56879b4deae8f00770803c2"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.636651 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" event={"ID":"9a3b0d5b-a479-44ec-8144-630698bb2792","Type":"ContainerStarted","Data":"f120928805fd322819b31af3fa5b0494e0e50a937026df9c41ef7b4ae370384e"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.639594 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" event={"ID":"a3fbc7b8-0f81-44b9-8890-5f6c124f2fa8","Type":"ContainerStarted","Data":"d0bffdb5988afae6779eaa22e514d9b0f97773a7c029950d026a6af73f55c333"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.644668 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zb4gn" podStartSLOduration=126.644652729 podStartE2EDuration="2m6.644652729s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.644083503 +0000 UTC m=+147.029597118" watchObservedRunningTime="2025-11-25 18:15:16.644652729 +0000 UTC m=+147.030166334" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.660464 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.661780 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.161761946 +0000 UTC m=+147.547275551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.663058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" event={"ID":"f77438bb-ad38-4daa-ba55-108543030e57","Type":"ContainerStarted","Data":"40df2848b3ef3bdba6cbbebc63a9bcbf932122bda406a4eb33f8e8758a9fa477"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.677520 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" event={"ID":"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f","Type":"ContainerStarted","Data":"3b259915f2d0cb1192c24f68f34b665fc3982b67a6b747f7269c9940563312de"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.678494 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.681856 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-p5t89" event={"ID":"864adf3d-a017-4eac-944b-5aced3d2d765","Type":"ContainerStarted","Data":"5cc2528c0f8f4587fce2cb9afe693a38fb9c7e5b6443235e99fe120d972d19fa"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.685274 4926 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-m6qg7 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.685320 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" podUID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.688246 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-cfm2d" event={"ID":"6627639e-ac72-44d9-a2f5-837d5244688c","Type":"ContainerStarted","Data":"6a59634861959e8dc24a0845196b04fde40a18c1a3de17e3d45f6eb9918af8a3"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.688293 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-cfm2d" event={"ID":"6627639e-ac72-44d9-a2f5-837d5244688c","Type":"ContainerStarted","Data":"36f4db922dde3ff0a40e8966831413144261df01218e6fad58a40ee339971137"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.688787 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.690079 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" event={"ID":"67bbfb91-6c12-4b68-9a20-33f62381f57f","Type":"ContainerStarted","Data":"0cd5c887b79ad11bc9dc4c350eaf3e45686e1117699012f48deb43f190e2c627"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.702200 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" event={"ID":"900fd29e-9f47-40d8-b232-fca71cd10642","Type":"ContainerStarted","Data":"00c1ce7d325c669ef50751b2c5302e7bcf2b7ba0865758b9d63664b8f9809b9b"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.702281 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" event={"ID":"900fd29e-9f47-40d8-b232-fca71cd10642","Type":"ContainerStarted","Data":"9435cbb74905b6ef93c6b440d7d9dc2e06bd13078770eb2d09f7981918b491a3"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.705076 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-p8xd7" event={"ID":"d5d6426f-e798-4091-8a51-7ad22cff4892","Type":"ContainerStarted","Data":"19887c0711399a73e0897a5ddeba9b468f290879627687e6d1656218534812c6"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.712242 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" event={"ID":"ee4dcba5-cece-4763-93e2-3e08ab0b883b","Type":"ContainerStarted","Data":"7b7fad448731354fb9c4d8253cdd56cf5d74b059b3d667822da0a37dca01c3b6"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.712283 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" event={"ID":"ee4dcba5-cece-4763-93e2-3e08ab0b883b","Type":"ContainerStarted","Data":"d9a534ef4c9677aa978a32ebd3c5305168778001271e41a264442ed57eca8754"} Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.715849 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-r4ssr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.716329 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r4ssr" podUID="00045d3a-a833-44a7-87db-45cf8cfb26d1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.729931 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gkjwp" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.730773 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ftsfp" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.753984 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-bjxzl" podStartSLOduration=126.753967453 podStartE2EDuration="2m6.753967453s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.691011216 +0000 UTC m=+147.076524821" watchObservedRunningTime="2025-11-25 18:15:16.753967453 +0000 UTC m=+147.139481058" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.754767 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" podStartSLOduration=126.754761526 podStartE2EDuration="2m6.754761526s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.754155458 +0000 UTC m=+147.139669063" watchObservedRunningTime="2025-11-25 18:15:16.754761526 +0000 UTC m=+147.140275131" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.767041 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.770796 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.270781231 +0000 UTC m=+147.656294846 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.795603 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-xjm9m" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.814769 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7v5gc" podStartSLOduration=127.814751988 podStartE2EDuration="2m7.814751988s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.814710717 +0000 UTC m=+147.200224332" watchObservedRunningTime="2025-11-25 18:15:16.814751988 +0000 UTC m=+147.200265593" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.851640 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2g56h" podStartSLOduration=126.851622479 podStartE2EDuration="2m6.851622479s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.84928324 +0000 UTC m=+147.234796855" watchObservedRunningTime="2025-11-25 18:15:16.851622479 +0000 UTC m=+147.237136084" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.872531 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.874053 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.374035749 +0000 UTC m=+147.759549364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.911208 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-cfm2d" podStartSLOduration=7.911191478 podStartE2EDuration="7.911191478s" podCreationTimestamp="2025-11-25 18:15:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:16.908285734 +0000 UTC m=+147.293799359" watchObservedRunningTime="2025-11-25 18:15:16.911191478 +0000 UTC m=+147.296705093" Nov 25 18:15:16 crc kubenswrapper[4926]: I1125 18:15:16.974852 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:16 crc kubenswrapper[4926]: E1125 18:15:16.975292 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.475271688 +0000 UTC m=+147.860785454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.070772 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-dzlmz" podStartSLOduration=128.070752961 podStartE2EDuration="2m8.070752961s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:17.01318703 +0000 UTC m=+147.398700635" watchObservedRunningTime="2025-11-25 18:15:17.070752961 +0000 UTC m=+147.456266566" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.079678 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.080416 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.580385751 +0000 UTC m=+147.965899356 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.106471 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.111598 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:17 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:17 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:17 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.111661 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.190228 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.190618 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.690605381 +0000 UTC m=+148.076118986 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.203076 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sc7cs" podStartSLOduration=127.203058642 podStartE2EDuration="2m7.203058642s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:17.142148954 +0000 UTC m=+147.527662579" watchObservedRunningTime="2025-11-25 18:15:17.203058642 +0000 UTC m=+147.588572247" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.203638 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-p5t89" podStartSLOduration=127.203633339 podStartE2EDuration="2m7.203633339s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:17.201166978 +0000 UTC m=+147.586680593" watchObservedRunningTime="2025-11-25 18:15:17.203633339 +0000 UTC m=+147.589146944" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.223084 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" podStartSLOduration=127.223062193 podStartE2EDuration="2m7.223062193s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:17.22020257 +0000 UTC m=+147.605716175" watchObservedRunningTime="2025-11-25 18:15:17.223062193 +0000 UTC m=+147.608575798" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.262833 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.294130 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.294316 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.294350 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.296541 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.296644 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.796602249 +0000 UTC m=+148.182115854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.303011 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-p8xd7" podStartSLOduration=9.302985184 podStartE2EDuration="9.302985184s" podCreationTimestamp="2025-11-25 18:15:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:17.258027308 +0000 UTC m=+147.643540913" watchObservedRunningTime="2025-11-25 18:15:17.302985184 +0000 UTC m=+147.688498789" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.304757 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podStartSLOduration=128.304750685 podStartE2EDuration="2m8.304750685s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:17.302403277 +0000 UTC m=+147.687916882" watchObservedRunningTime="2025-11-25 18:15:17.304750685 +0000 UTC m=+147.690264290" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.305354 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.396342 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.396435 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.396491 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.398721 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.898705653 +0000 UTC m=+148.284219258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.408736 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.420745 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.497831 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.498340 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:17.998317215 +0000 UTC m=+148.383830810 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.543944 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.550094 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.576431 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mnpbw"] Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.577704 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.581428 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.593436 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mnpbw"] Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.600326 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.600797 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.10077693 +0000 UTC m=+148.486290535 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.645542 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.702035 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.702319 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-utilities\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.702390 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-catalog-content\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.702470 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcbt9\" (UniqueName: \"kubernetes.io/projected/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-kube-api-access-zcbt9\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.702623 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.202607827 +0000 UTC m=+148.588121432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.759177 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" event={"ID":"ca675b57-be0b-4dd2-9d94-7f25262c885d","Type":"ContainerStarted","Data":"78786ec84aa42b1b17bdd52d42a0731b9fc158c37f586ec69e74b0e49346cc57"} Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.759227 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" event={"ID":"ca675b57-be0b-4dd2-9d94-7f25262c885d","Type":"ContainerStarted","Data":"3440c06926d737ca7837258d32cdbf112c766c45278811452f2f23cfdec21564"} Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.761330 4926 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-m6qg7 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.761367 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" podUID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.761554 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-r4ssr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.761637 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r4ssr" podUID="00045d3a-a833-44a7-87db-45cf8cfb26d1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.779863 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-skkd4"] Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.781054 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.786910 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.803741 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-utilities\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.803788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-catalog-content\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.803840 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcbt9\" (UniqueName: \"kubernetes.io/projected/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-kube-api-access-zcbt9\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.803879 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.804176 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.304162095 +0000 UTC m=+148.689675700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.804398 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-utilities\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.807748 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-catalog-content\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.807865 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-skkd4"] Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.896156 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcbt9\" (UniqueName: \"kubernetes.io/projected/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-kube-api-access-zcbt9\") pod \"certified-operators-mnpbw\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.908690 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.911118 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.911391 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-catalog-content\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.911536 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64msz\" (UniqueName: \"kubernetes.io/projected/929a2b2c-f4a1-47b4-ab76-327d9b68b730-kube-api-access-64msz\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.912087 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-utilities\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:17 crc kubenswrapper[4926]: E1125 18:15:17.912366 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.412347517 +0000 UTC m=+148.797861122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:17 crc kubenswrapper[4926]: I1125 18:15:17.997490 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dz9mp"] Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.000474 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.030575 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-utilities\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.030643 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-catalog-content\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.030671 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64msz\" (UniqueName: \"kubernetes.io/projected/929a2b2c-f4a1-47b4-ab76-327d9b68b730-kube-api-access-64msz\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.030729 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.031014 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.531001331 +0000 UTC m=+148.916514936 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.031642 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-utilities\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.032164 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-catalog-content\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.040859 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dz9mp"] Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.104216 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64msz\" (UniqueName: \"kubernetes.io/projected/929a2b2c-f4a1-47b4-ab76-327d9b68b730-kube-api-access-64msz\") pod \"community-operators-skkd4\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.110954 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:18 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:18 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:18 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.111083 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.140462 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.140874 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-utilities\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.141005 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-catalog-content\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.141043 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6849\" (UniqueName: \"kubernetes.io/projected/d060967d-136d-4023-8178-ad4b5270cd12-kube-api-access-r6849\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.141167 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.641149649 +0000 UTC m=+149.026663254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.176427 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6frbc"] Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.178222 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.219637 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6frbc"] Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.246955 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-utilities\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.247504 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-utilities\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.247563 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-catalog-content\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.247596 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6849\" (UniqueName: \"kubernetes.io/projected/d060967d-136d-4023-8178-ad4b5270cd12-kube-api-access-r6849\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.247644 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.247671 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-catalog-content\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.247721 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvlss\" (UniqueName: \"kubernetes.io/projected/130f3c89-e3ab-4b56-b499-7fa327ab5822-kube-api-access-pvlss\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.248283 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-utilities\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.248771 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.748748484 +0000 UTC m=+149.134262339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.248804 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-catalog-content\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.287065 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6849\" (UniqueName: \"kubernetes.io/projected/d060967d-136d-4023-8178-ad4b5270cd12-kube-api-access-r6849\") pod \"certified-operators-dz9mp\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.351478 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.351688 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.851658601 +0000 UTC m=+149.237172206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.351855 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-utilities\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.351956 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.352002 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-catalog-content\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.352048 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvlss\" (UniqueName: \"kubernetes.io/projected/130f3c89-e3ab-4b56-b499-7fa327ab5822-kube-api-access-pvlss\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.352277 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.85226901 +0000 UTC m=+149.237782615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.353687 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-catalog-content\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.354166 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-utilities\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.359749 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.395349 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvlss\" (UniqueName: \"kubernetes.io/projected/130f3c89-e3ab-4b56-b499-7fa327ab5822-kube-api-access-pvlss\") pod \"community-operators-6frbc\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.396969 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.454497 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.454759 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:18.954744774 +0000 UTC m=+149.340258379 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.500952 4926 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.524152 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.560100 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.560481 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:19.060465984 +0000 UTC m=+149.445979589 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.599134 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mnpbw"] Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.601842 4926 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T18:15:18.501346558Z","Handler":null,"Name":""} Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.664196 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.666102 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 18:15:19.166076681 +0000 UTC m=+149.551590286 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.666248 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:18 crc kubenswrapper[4926]: E1125 18:15:18.666538 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 18:15:19.166531314 +0000 UTC m=+149.552044919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qkn8k" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.701642 4926 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.702105 4926 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.767994 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.816950 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.819281 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" event={"ID":"ca675b57-be0b-4dd2-9d94-7f25262c885d","Type":"ContainerStarted","Data":"a94b772788ac83a2db78c740603ebb26e7d96032fd74d1a9801710ad8df1ee40"} Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.849649 4926 generic.go:334] "Generic (PLEG): container finished" podID="b6560e66-aef6-4fd2-b808-4bdfaad6b992" containerID="2361a082ccf88b5a14c2b5d5ab11d7acf571ffa5267e3ae4f5325856560693ab" exitCode=0 Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.849782 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" event={"ID":"b6560e66-aef6-4fd2-b808-4bdfaad6b992","Type":"ContainerDied","Data":"2361a082ccf88b5a14c2b5d5ab11d7acf571ffa5267e3ae4f5325856560693ab"} Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.869633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.882056 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"599a367add31951f9c3aba81498b52522005b52ddacd139e7493fd3697a77260"} Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.882122 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"3261926d82457586473f20bfa7c7897ff012630483a41324202437c486c345e1"} Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.888512 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mnpbw" event={"ID":"fb1af6de-8249-426c-a3ab-2ba9e009c1d7","Type":"ContainerStarted","Data":"86bfbf2e2503a897efeabf46c6b9f87b344ec27425307a878bfc3f58c57a31e0"} Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.894526 4926 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.894559 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.930749 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"04b0a4be5f6317ddc1d8690b5170dcf88ab3e4dacc264499ac7080603a4591bc"} Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.938158 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:15:18 crc kubenswrapper[4926]: I1125 18:15:18.990521 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qkn8k\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.010926 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-skkd4"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.059952 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dz9mp"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.077038 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.115629 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:19 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:19 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:19 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.115681 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.284484 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6frbc"] Nov 25 18:15:19 crc kubenswrapper[4926]: W1125 18:15:19.328859 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod130f3c89_e3ab_4b56_b499_7fa327ab5822.slice/crio-6e854c75245fd33e05f114d003b903376d968b5f627d2a5e33a90530e822d13f WatchSource:0}: Error finding container 6e854c75245fd33e05f114d003b903376d968b5f627d2a5e33a90530e822d13f: Status 404 returned error can't find the container with id 6e854c75245fd33e05f114d003b903376d968b5f627d2a5e33a90530e822d13f Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.364832 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.365804 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.367298 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.369358 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.369679 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.383811 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.383942 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.454715 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qkn8k"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.484850 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.484942 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.486606 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.510716 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.640551 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.767153 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-w7nrb"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.768233 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.774756 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.788611 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7nrb"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.795152 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kwc2\" (UniqueName: \"kubernetes.io/projected/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-kube-api-access-7kwc2\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.795231 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-utilities\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.795271 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-catalog-content\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.897643 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-catalog-content\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.897712 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kwc2\" (UniqueName: \"kubernetes.io/projected/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-kube-api-access-7kwc2\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.897754 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-utilities\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.898233 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-utilities\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.898493 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-catalog-content\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.903599 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.903788 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.911899 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]log ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]etcd ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 18:15:19 crc kubenswrapper[4926]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 25 18:15:19 crc kubenswrapper[4926]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 18:15:19 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 18:15:19 crc kubenswrapper[4926]: livez check failed Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.911988 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.915402 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.920776 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.922913 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kwc2\" (UniqueName: \"kubernetes.io/projected/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-kube-api-access-7kwc2\") pod \"redhat-marketplace-w7nrb\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.948348 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"33794245055a85e1ea8c2a7e90daf9bbfa8d63d6f02d872b0e64016a997123db"} Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.948492 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"583d6192f7b9bd27deccac7ef8dc2e6bbfaacf1edc6e344e0f83924cd52ef476"} Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.950631 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.959717 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"cc823b6170029cf5eaad42d460e7017a32a275ab3ac29765eed19e5eab5c3b81"} Nov 25 18:15:19 crc kubenswrapper[4926]: I1125 18:15:19.995447 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" event={"ID":"ca675b57-be0b-4dd2-9d94-7f25262c885d","Type":"ContainerStarted","Data":"d353c4da23897a4c63ad0bbc1907614f792694cba7a9d03a8ebe00c682fb4324"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.002702 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"cf4a767b-f81c-496c-a4f6-e90932cb4cc1","Type":"ContainerStarted","Data":"6239f12b2537c97f842b6d2b671aea8621f288ee21d2b1dc58ac5a9dd0329a9a"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.004595 4926 generic.go:334] "Generic (PLEG): container finished" podID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerID="7ca0e9f1dc8bbea4c046cbecbc169796a9018a1b0592f047e6fdf62c8a30a7ee" exitCode=0 Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.004643 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skkd4" event={"ID":"929a2b2c-f4a1-47b4-ab76-327d9b68b730","Type":"ContainerDied","Data":"7ca0e9f1dc8bbea4c046cbecbc169796a9018a1b0592f047e6fdf62c8a30a7ee"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.004659 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skkd4" event={"ID":"929a2b2c-f4a1-47b4-ab76-327d9b68b730","Type":"ContainerStarted","Data":"1d774139cebea438e009e9a0d8eb415481f77376332565f60e059917a304e259"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.006323 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.010142 4926 generic.go:334] "Generic (PLEG): container finished" podID="d060967d-136d-4023-8178-ad4b5270cd12" containerID="8806999c5fab47111e3c1d034b12e6a8bc509edb084280e5d24405cf40153886" exitCode=0 Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.010234 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dz9mp" event={"ID":"d060967d-136d-4023-8178-ad4b5270cd12","Type":"ContainerDied","Data":"8806999c5fab47111e3c1d034b12e6a8bc509edb084280e5d24405cf40153886"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.010268 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dz9mp" event={"ID":"d060967d-136d-4023-8178-ad4b5270cd12","Type":"ContainerStarted","Data":"53e6dff7739df21bb36e2b9b79e6874894fa993e8abedc4a992983db0c1907a0"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.017169 4926 generic.go:334] "Generic (PLEG): container finished" podID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerID="e66b87e267ebc08866a734ad21b1e9d4d61e96140746f2e06ffa8ad12069899a" exitCode=0 Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.017273 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6frbc" event={"ID":"130f3c89-e3ab-4b56-b499-7fa327ab5822","Type":"ContainerDied","Data":"e66b87e267ebc08866a734ad21b1e9d4d61e96140746f2e06ffa8ad12069899a"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.017298 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6frbc" event={"ID":"130f3c89-e3ab-4b56-b499-7fa327ab5822","Type":"ContainerStarted","Data":"6e854c75245fd33e05f114d003b903376d968b5f627d2a5e33a90530e822d13f"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.023948 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-nqxtc" podStartSLOduration=11.023930255 podStartE2EDuration="11.023930255s" podCreationTimestamp="2025-11-25 18:15:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:20.021227426 +0000 UTC m=+150.406741031" watchObservedRunningTime="2025-11-25 18:15:20.023930255 +0000 UTC m=+150.409443860" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.029644 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" event={"ID":"881e35fe-f917-461a-a1d6-804e58b5b740","Type":"ContainerStarted","Data":"36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.029703 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" event={"ID":"881e35fe-f917-461a-a1d6-804e58b5b740","Type":"ContainerStarted","Data":"e131c0a16cd35e21b4e8a4df33f3b8ab03ed857b4ae6e7c48c0701aacb5e6be4"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.032605 4926 generic.go:334] "Generic (PLEG): container finished" podID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerID="143589e273001538e55794f4cd8d17788398a7c3336ce0b6ca9afb4e6caae2bd" exitCode=0 Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.032736 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mnpbw" event={"ID":"fb1af6de-8249-426c-a3ab-2ba9e009c1d7","Type":"ContainerDied","Data":"143589e273001538e55794f4cd8d17788398a7c3336ce0b6ca9afb4e6caae2bd"} Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.091498 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.126970 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:20 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:20 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:20 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.127102 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.162808 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bqtsc"] Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.163834 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.183208 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bqtsc"] Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.211724 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-utilities\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.211814 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-catalog-content\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.211846 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xw8v\" (UniqueName: \"kubernetes.io/projected/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-kube-api-access-8xw8v\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.313632 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-utilities\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.313698 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-catalog-content\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.313737 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xw8v\" (UniqueName: \"kubernetes.io/projected/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-kube-api-access-8xw8v\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.314611 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-utilities\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.315023 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-catalog-content\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.355394 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xw8v\" (UniqueName: \"kubernetes.io/projected/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-kube-api-access-8xw8v\") pod \"redhat-marketplace-bqtsc\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.366192 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.400954 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.436895 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7nrb"] Nov 25 18:15:20 crc kubenswrapper[4926]: W1125 18:15:20.450051 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6ea68a1_b0ef_4035_9466_643ca03fc8a6.slice/crio-f9c3bd4728639144a6e1fe924af0370c48d4921a22ed5eb5cae017095074b102 WatchSource:0}: Error finding container f9c3bd4728639144a6e1fe924af0370c48d4921a22ed5eb5cae017095074b102: Status 404 returned error can't find the container with id f9c3bd4728639144a6e1fe924af0370c48d4921a22ed5eb5cae017095074b102 Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.482315 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.519758 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4gmq\" (UniqueName: \"kubernetes.io/projected/b6560e66-aef6-4fd2-b808-4bdfaad6b992-kube-api-access-h4gmq\") pod \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.519880 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6560e66-aef6-4fd2-b808-4bdfaad6b992-secret-volume\") pod \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.519987 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume\") pod \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\" (UID: \"b6560e66-aef6-4fd2-b808-4bdfaad6b992\") " Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.521577 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume" (OuterVolumeSpecName: "config-volume") pod "b6560e66-aef6-4fd2-b808-4bdfaad6b992" (UID: "b6560e66-aef6-4fd2-b808-4bdfaad6b992"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.525010 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6560e66-aef6-4fd2-b808-4bdfaad6b992-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b6560e66-aef6-4fd2-b808-4bdfaad6b992" (UID: "b6560e66-aef6-4fd2-b808-4bdfaad6b992"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.525235 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6560e66-aef6-4fd2-b808-4bdfaad6b992-kube-api-access-h4gmq" (OuterVolumeSpecName: "kube-api-access-h4gmq") pod "b6560e66-aef6-4fd2-b808-4bdfaad6b992" (UID: "b6560e66-aef6-4fd2-b808-4bdfaad6b992"). InnerVolumeSpecName "kube-api-access-h4gmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.621814 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4gmq\" (UniqueName: \"kubernetes.io/projected/b6560e66-aef6-4fd2-b808-4bdfaad6b992-kube-api-access-h4gmq\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.621849 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6560e66-aef6-4fd2-b808-4bdfaad6b992-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.621859 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6560e66-aef6-4fd2-b808-4bdfaad6b992-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.774228 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lcpfw"] Nov 25 18:15:20 crc kubenswrapper[4926]: E1125 18:15:20.774877 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6560e66-aef6-4fd2-b808-4bdfaad6b992" containerName="collect-profiles" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.774889 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6560e66-aef6-4fd2-b808-4bdfaad6b992" containerName="collect-profiles" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.775035 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6560e66-aef6-4fd2-b808-4bdfaad6b992" containerName="collect-profiles" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.775798 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.780669 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.826982 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-catalog-content\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.827061 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-utilities\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.827080 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87gpn\" (UniqueName: \"kubernetes.io/projected/331da98f-d117-4363-bc17-2bbd84a7f7d7-kube-api-access-87gpn\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.856991 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lcpfw"] Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.928830 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-catalog-content\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.929006 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-utilities\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.929035 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87gpn\" (UniqueName: \"kubernetes.io/projected/331da98f-d117-4363-bc17-2bbd84a7f7d7-kube-api-access-87gpn\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.929730 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-catalog-content\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.929734 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-utilities\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.956888 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87gpn\" (UniqueName: \"kubernetes.io/projected/331da98f-d117-4363-bc17-2bbd84a7f7d7-kube-api-access-87gpn\") pod \"redhat-operators-lcpfw\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:20 crc kubenswrapper[4926]: I1125 18:15:20.985219 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bqtsc"] Nov 25 18:15:20 crc kubenswrapper[4926]: W1125 18:15:20.993760 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf58da4a7_76aa_47fc_89d9_1da13a4fe67b.slice/crio-962e5f89d67c0c20822acf30af36c883bcaa80490df297086b931ec6fed4a4aa WatchSource:0}: Error finding container 962e5f89d67c0c20822acf30af36c883bcaa80490df297086b931ec6fed4a4aa: Status 404 returned error can't find the container with id 962e5f89d67c0c20822acf30af36c883bcaa80490df297086b931ec6fed4a4aa Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.045429 4926 generic.go:334] "Generic (PLEG): container finished" podID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerID="9896705a879e5839ec9a22855fe829e66b12986c5d41e0808604cbb0b5c2fbdc" exitCode=0 Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.045556 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7nrb" event={"ID":"b6ea68a1-b0ef-4035-9466-643ca03fc8a6","Type":"ContainerDied","Data":"9896705a879e5839ec9a22855fe829e66b12986c5d41e0808604cbb0b5c2fbdc"} Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.045604 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7nrb" event={"ID":"b6ea68a1-b0ef-4035-9466-643ca03fc8a6","Type":"ContainerStarted","Data":"f9c3bd4728639144a6e1fe924af0370c48d4921a22ed5eb5cae017095074b102"} Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.049782 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.049779 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs" event={"ID":"b6560e66-aef6-4fd2-b808-4bdfaad6b992","Type":"ContainerDied","Data":"6a6ab94847dc7c653e0540ee93c87294a9e41093d690686f420ba66e5f7139f7"} Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.049879 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a6ab94847dc7c653e0540ee93c87294a9e41093d690686f420ba66e5f7139f7" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.053969 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"cf4a767b-f81c-496c-a4f6-e90932cb4cc1","Type":"ContainerStarted","Data":"effd293ef88a68a646d80137987e89f4e8eb0981ff4357d6278a3f05fa99a89f"} Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.056890 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bqtsc" event={"ID":"f58da4a7-76aa-47fc-89d9-1da13a4fe67b","Type":"ContainerStarted","Data":"962e5f89d67c0c20822acf30af36c883bcaa80490df297086b931ec6fed4a4aa"} Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.057300 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.111557 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" podStartSLOduration=131.111529854 podStartE2EDuration="2m11.111529854s" podCreationTimestamp="2025-11-25 18:13:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:21.102507411 +0000 UTC m=+151.488021026" watchObservedRunningTime="2025-11-25 18:15:21.111529854 +0000 UTC m=+151.497043459" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.114243 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.116593 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:21 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:21 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:21 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.116708 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.127852 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.127832816 podStartE2EDuration="2.127832816s" podCreationTimestamp="2025-11-25 18:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:21.124921012 +0000 UTC m=+151.510434617" watchObservedRunningTime="2025-11-25 18:15:21.127832816 +0000 UTC m=+151.513346421" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.166815 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2nnr2"] Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.168514 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.179889 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2nnr2"] Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.233499 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9kgz\" (UniqueName: \"kubernetes.io/projected/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-kube-api-access-k9kgz\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.233567 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-utilities\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.233595 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-catalog-content\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.334425 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9kgz\" (UniqueName: \"kubernetes.io/projected/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-kube-api-access-k9kgz\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.334482 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-utilities\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.334508 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-catalog-content\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.335407 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-utilities\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.338152 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-catalog-content\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: E1125 18:15:21.352890 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf58da4a7_76aa_47fc_89d9_1da13a4fe67b.slice/crio-956a0113267575c0c32e90f91855fcc5edd970c931657d1bd59ee8f77c76cacd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf58da4a7_76aa_47fc_89d9_1da13a4fe67b.slice/crio-conmon-956a0113267575c0c32e90f91855fcc5edd970c931657d1bd59ee8f77c76cacd.scope\": RecentStats: unable to find data in memory cache]" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.371287 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9kgz\" (UniqueName: \"kubernetes.io/projected/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-kube-api-access-k9kgz\") pod \"redhat-operators-2nnr2\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.492988 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.517416 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.517499 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.522356 4926 patch_prober.go:28] interesting pod/console-f9d7485db-597mc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.522475 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-597mc" podUID="e67bd5e5-a3c9-4576-93e6-6d7073142160" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.637180 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lcpfw"] Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.743314 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-r4ssr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.743423 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r4ssr" podUID="00045d3a-a833-44a7-87db-45cf8cfb26d1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.743442 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-r4ssr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.743624 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r4ssr" podUID="00045d3a-a833-44a7-87db-45cf8cfb26d1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 18:15:21 crc kubenswrapper[4926]: I1125 18:15:21.930846 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2nnr2"] Nov 25 18:15:21 crc kubenswrapper[4926]: W1125 18:15:21.936744 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14fd9c00_7eb5_46d1_a42b_c3b79ff781a4.slice/crio-bae3fabae3814e73f350b03ed9b94dc98908dab978ea8f3bd9e1bc05eacc6d45 WatchSource:0}: Error finding container bae3fabae3814e73f350b03ed9b94dc98908dab978ea8f3bd9e1bc05eacc6d45: Status 404 returned error can't find the container with id bae3fabae3814e73f350b03ed9b94dc98908dab978ea8f3bd9e1bc05eacc6d45 Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.098095 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nnr2" event={"ID":"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4","Type":"ContainerStarted","Data":"bae3fabae3814e73f350b03ed9b94dc98908dab978ea8f3bd9e1bc05eacc6d45"} Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.099316 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.103128 4926 generic.go:334] "Generic (PLEG): container finished" podID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerID="956a0113267575c0c32e90f91855fcc5edd970c931657d1bd59ee8f77c76cacd" exitCode=0 Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.103255 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bqtsc" event={"ID":"f58da4a7-76aa-47fc-89d9-1da13a4fe67b","Type":"ContainerDied","Data":"956a0113267575c0c32e90f91855fcc5edd970c931657d1bd59ee8f77c76cacd"} Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.103436 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:22 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:22 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:22 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.103489 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.112809 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf4a767b-f81c-496c-a4f6-e90932cb4cc1" containerID="effd293ef88a68a646d80137987e89f4e8eb0981ff4357d6278a3f05fa99a89f" exitCode=0 Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.112904 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"cf4a767b-f81c-496c-a4f6-e90932cb4cc1","Type":"ContainerDied","Data":"effd293ef88a68a646d80137987e89f4e8eb0981ff4357d6278a3f05fa99a89f"} Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.120125 4926 generic.go:334] "Generic (PLEG): container finished" podID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerID="b80daf079cc3632d5a677a4cf5092c2bb5227dc106d31af0c4bf429705ffdc93" exitCode=0 Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.123539 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lcpfw" event={"ID":"331da98f-d117-4363-bc17-2bbd84a7f7d7","Type":"ContainerDied","Data":"b80daf079cc3632d5a677a4cf5092c2bb5227dc106d31af0c4bf429705ffdc93"} Nov 25 18:15:22 crc kubenswrapper[4926]: I1125 18:15:22.123570 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lcpfw" event={"ID":"331da98f-d117-4363-bc17-2bbd84a7f7d7","Type":"ContainerStarted","Data":"a9844d9b2a9bbc389173494ca88bc6fd793ff1c56a80cf55778d47f6e685699d"} Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.103635 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:23 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:23 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:23 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.104009 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.206258 4926 generic.go:334] "Generic (PLEG): container finished" podID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerID="a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf" exitCode=0 Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.206468 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nnr2" event={"ID":"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4","Type":"ContainerDied","Data":"a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf"} Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.550042 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.584218 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kube-api-access\") pod \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.584357 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kubelet-dir\") pod \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\" (UID: \"cf4a767b-f81c-496c-a4f6-e90932cb4cc1\") " Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.584678 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "cf4a767b-f81c-496c-a4f6-e90932cb4cc1" (UID: "cf4a767b-f81c-496c-a4f6-e90932cb4cc1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.594557 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "cf4a767b-f81c-496c-a4f6-e90932cb4cc1" (UID: "cf4a767b-f81c-496c-a4f6-e90932cb4cc1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.685637 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.685718 4926 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf4a767b-f81c-496c-a4f6-e90932cb4cc1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.896108 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 18:15:23 crc kubenswrapper[4926]: E1125 18:15:23.896344 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf4a767b-f81c-496c-a4f6-e90932cb4cc1" containerName="pruner" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.896356 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf4a767b-f81c-496c-a4f6-e90932cb4cc1" containerName="pruner" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.896487 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf4a767b-f81c-496c-a4f6-e90932cb4cc1" containerName="pruner" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.897042 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.899437 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.899607 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.908505 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.990100 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:23 crc kubenswrapper[4926]: I1125 18:15:23.990475 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.091177 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.091247 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.091418 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.102939 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:24 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:24 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:24 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.103007 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.112680 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.219108 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.223471 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"cf4a767b-f81c-496c-a4f6-e90932cb4cc1","Type":"ContainerDied","Data":"6239f12b2537c97f842b6d2b671aea8621f288ee21d2b1dc58ac5a9dd0329a9a"} Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.223534 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6239f12b2537c97f842b6d2b671aea8621f288ee21d2b1dc58ac5a9dd0329a9a" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.223506 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.916450 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.918725 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:24 crc kubenswrapper[4926]: I1125 18:15:24.949595 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 18:15:25 crc kubenswrapper[4926]: I1125 18:15:25.103922 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:25 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:25 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:25 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:25 crc kubenswrapper[4926]: I1125 18:15:25.103997 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:25 crc kubenswrapper[4926]: I1125 18:15:25.265641 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"52fed8b5-813f-4a7f-9f6d-31d525a0aa16","Type":"ContainerStarted","Data":"893144aee96e12f525ef7235f4b9c9002f224472cc259e53faa9105780363298"} Nov 25 18:15:26 crc kubenswrapper[4926]: I1125 18:15:26.102079 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:26 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:26 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:26 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:26 crc kubenswrapper[4926]: I1125 18:15:26.102447 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:26 crc kubenswrapper[4926]: I1125 18:15:26.950354 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-cfm2d" Nov 25 18:15:27 crc kubenswrapper[4926]: I1125 18:15:27.102544 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:27 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:27 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:27 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:27 crc kubenswrapper[4926]: I1125 18:15:27.102692 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:27 crc kubenswrapper[4926]: I1125 18:15:27.287179 4926 generic.go:334] "Generic (PLEG): container finished" podID="52fed8b5-813f-4a7f-9f6d-31d525a0aa16" containerID="7a88af891027681eda88ffd0d65c9d73b502668a23fd97f81e0d928c083dff9a" exitCode=0 Nov 25 18:15:27 crc kubenswrapper[4926]: I1125 18:15:27.287225 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"52fed8b5-813f-4a7f-9f6d-31d525a0aa16","Type":"ContainerDied","Data":"7a88af891027681eda88ffd0d65c9d73b502668a23fd97f81e0d928c083dff9a"} Nov 25 18:15:28 crc kubenswrapper[4926]: I1125 18:15:28.105926 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:28 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:28 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:28 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:28 crc kubenswrapper[4926]: I1125 18:15:28.106338 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:29 crc kubenswrapper[4926]: I1125 18:15:29.103525 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:29 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:29 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:29 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:29 crc kubenswrapper[4926]: I1125 18:15:29.103782 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:30 crc kubenswrapper[4926]: I1125 18:15:30.102238 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:30 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:30 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:30 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:30 crc kubenswrapper[4926]: I1125 18:15:30.102292 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.103518 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:31 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:31 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:31 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.103947 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.517951 4926 patch_prober.go:28] interesting pod/console-f9d7485db-597mc container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.518185 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-597mc" podUID="e67bd5e5-a3c9-4576-93e6-6d7073142160" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.743644 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-r4ssr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.743711 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r4ssr" podUID="00045d3a-a833-44a7-87db-45cf8cfb26d1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.743648 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-r4ssr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 25 18:15:31 crc kubenswrapper[4926]: I1125 18:15:31.744114 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r4ssr" podUID="00045d3a-a833-44a7-87db-45cf8cfb26d1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.23:8080/\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 25 18:15:32 crc kubenswrapper[4926]: I1125 18:15:32.101985 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:32 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:32 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:32 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:32 crc kubenswrapper[4926]: I1125 18:15:32.102049 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:32 crc kubenswrapper[4926]: I1125 18:15:32.138508 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:15:32 crc kubenswrapper[4926]: I1125 18:15:32.162994 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6aa4d7ff-fb65-4a4b-b745-8bb9151862f5-metrics-certs\") pod \"network-metrics-daemon-2mwzk\" (UID: \"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5\") " pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:15:32 crc kubenswrapper[4926]: I1125 18:15:32.350524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-2mwzk" Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.102142 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:33 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 25 18:15:33 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:33 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.102238 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.541044 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.541112 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.782573 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.860537 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kubelet-dir\") pod \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.860638 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kube-api-access\") pod \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\" (UID: \"52fed8b5-813f-4a7f-9f6d-31d525a0aa16\") " Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.860672 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "52fed8b5-813f-4a7f-9f6d-31d525a0aa16" (UID: "52fed8b5-813f-4a7f-9f6d-31d525a0aa16"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.860929 4926 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.866834 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "52fed8b5-813f-4a7f-9f6d-31d525a0aa16" (UID: "52fed8b5-813f-4a7f-9f6d-31d525a0aa16"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:15:33 crc kubenswrapper[4926]: I1125 18:15:33.962688 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52fed8b5-813f-4a7f-9f6d-31d525a0aa16-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 18:15:34 crc kubenswrapper[4926]: I1125 18:15:34.104319 4926 patch_prober.go:28] interesting pod/router-default-5444994796-p5t89 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 18:15:34 crc kubenswrapper[4926]: [+]has-synced ok Nov 25 18:15:34 crc kubenswrapper[4926]: [+]process-running ok Nov 25 18:15:34 crc kubenswrapper[4926]: healthz check failed Nov 25 18:15:34 crc kubenswrapper[4926]: I1125 18:15:34.104407 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-p5t89" podUID="864adf3d-a017-4eac-944b-5aced3d2d765" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:15:34 crc kubenswrapper[4926]: I1125 18:15:34.338935 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 18:15:34 crc kubenswrapper[4926]: I1125 18:15:34.338981 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"52fed8b5-813f-4a7f-9f6d-31d525a0aa16","Type":"ContainerDied","Data":"893144aee96e12f525ef7235f4b9c9002f224472cc259e53faa9105780363298"} Nov 25 18:15:34 crc kubenswrapper[4926]: I1125 18:15:34.339477 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="893144aee96e12f525ef7235f4b9c9002f224472cc259e53faa9105780363298" Nov 25 18:15:35 crc kubenswrapper[4926]: I1125 18:15:35.107723 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:35 crc kubenswrapper[4926]: I1125 18:15:35.117312 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-p5t89" Nov 25 18:15:39 crc kubenswrapper[4926]: I1125 18:15:39.093414 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:15:41 crc kubenswrapper[4926]: I1125 18:15:41.524866 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:41 crc kubenswrapper[4926]: I1125 18:15:41.531318 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:15:41 crc kubenswrapper[4926]: I1125 18:15:41.760762 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-r4ssr" Nov 25 18:15:46 crc kubenswrapper[4926]: E1125 18:15:46.936202 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage1547399827/1\": happened during read: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 18:15:46 crc kubenswrapper[4926]: E1125 18:15:46.937440 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-87gpn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-lcpfw_openshift-marketplace(331da98f-d117-4363-bc17-2bbd84a7f7d7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage1547399827/1\": happened during read: context canceled" logger="UnhandledError" Nov 25 18:15:46 crc kubenswrapper[4926]: E1125 18:15:46.938862 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage1547399827/1\\\": happened during read: context canceled\"" pod="openshift-marketplace/redhat-operators-lcpfw" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" Nov 25 18:15:47 crc kubenswrapper[4926]: E1125 18:15:47.417364 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 18:15:47 crc kubenswrapper[4926]: E1125 18:15:47.417515 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8xw8v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-bqtsc_openshift-marketplace(f58da4a7-76aa-47fc-89d9-1da13a4fe67b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:15:47 crc kubenswrapper[4926]: E1125 18:15:47.418645 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-bqtsc" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" Nov 25 18:15:50 crc kubenswrapper[4926]: E1125 18:15:50.414814 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-lcpfw" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" Nov 25 18:15:50 crc kubenswrapper[4926]: E1125 18:15:50.414955 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-bqtsc" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" Nov 25 18:15:51 crc kubenswrapper[4926]: E1125 18:15:51.056182 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 18:15:51 crc kubenswrapper[4926]: E1125 18:15:51.056712 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pvlss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-6frbc_openshift-marketplace(130f3c89-e3ab-4b56-b499-7fa327ab5822): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:15:51 crc kubenswrapper[4926]: E1125 18:15:51.058025 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-6frbc" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" Nov 25 18:15:51 crc kubenswrapper[4926]: I1125 18:15:51.586566 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qpkmq" Nov 25 18:15:52 crc kubenswrapper[4926]: E1125 18:15:52.378976 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-6frbc" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" Nov 25 18:15:52 crc kubenswrapper[4926]: E1125 18:15:52.450498 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 18:15:52 crc kubenswrapper[4926]: E1125 18:15:52.450922 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7kwc2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-w7nrb_openshift-marketplace(b6ea68a1-b0ef-4035-9466-643ca03fc8a6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:15:52 crc kubenswrapper[4926]: E1125 18:15:52.452110 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-w7nrb" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" Nov 25 18:15:52 crc kubenswrapper[4926]: E1125 18:15:52.458298 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 18:15:52 crc kubenswrapper[4926]: E1125 18:15:52.458443 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r6849,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-dz9mp_openshift-marketplace(d060967d-136d-4023-8178-ad4b5270cd12): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:15:52 crc kubenswrapper[4926]: E1125 18:15:52.459619 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-dz9mp" podUID="d060967d-136d-4023-8178-ad4b5270cd12" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.113725 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-dz9mp" podUID="d060967d-136d-4023-8178-ad4b5270cd12" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.113741 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-w7nrb" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.191285 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.191827 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zcbt9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-mnpbw_openshift-marketplace(fb1af6de-8249-426c-a3ab-2ba9e009c1d7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.192848 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.192920 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-mnpbw" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.193024 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k9kgz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-2nnr2_openshift-marketplace(14fd9c00-7eb5-46d1-a42b-c3b79ff781a4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.194184 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-2nnr2" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.201620 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.201865 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-64msz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-skkd4_openshift-marketplace(929a2b2c-f4a1-47b4-ab76-327d9b68b730): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.203838 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-skkd4" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.470427 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-2nnr2" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.471203 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-mnpbw" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" Nov 25 18:15:55 crc kubenswrapper[4926]: E1125 18:15:55.471281 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-skkd4" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" Nov 25 18:15:55 crc kubenswrapper[4926]: I1125 18:15:55.532919 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-2mwzk"] Nov 25 18:15:55 crc kubenswrapper[4926]: W1125 18:15:55.551571 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6aa4d7ff_fb65_4a4b_b745_8bb9151862f5.slice/crio-daaa8ab5d1b664f4b8b6db80fc9e14759db8760dbef58cebd028251533595e54 WatchSource:0}: Error finding container daaa8ab5d1b664f4b8b6db80fc9e14759db8760dbef58cebd028251533595e54: Status 404 returned error can't find the container with id daaa8ab5d1b664f4b8b6db80fc9e14759db8760dbef58cebd028251533595e54 Nov 25 18:15:56 crc kubenswrapper[4926]: I1125 18:15:56.472903 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" event={"ID":"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5","Type":"ContainerStarted","Data":"b9085122d4879f579ee0dde540c5233b7ab6951fba1bb3fffdeff1c031fe7eef"} Nov 25 18:15:56 crc kubenswrapper[4926]: I1125 18:15:56.473538 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" event={"ID":"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5","Type":"ContainerStarted","Data":"e2df1966e1edb7473bf8ba095545fee79379eba89cb951ce22518b53cf6772de"} Nov 25 18:15:56 crc kubenswrapper[4926]: I1125 18:15:56.473552 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-2mwzk" event={"ID":"6aa4d7ff-fb65-4a4b-b745-8bb9151862f5","Type":"ContainerStarted","Data":"daaa8ab5d1b664f4b8b6db80fc9e14759db8760dbef58cebd028251533595e54"} Nov 25 18:15:56 crc kubenswrapper[4926]: I1125 18:15:56.493432 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-2mwzk" podStartSLOduration=167.49335619 podStartE2EDuration="2m47.49335619s" podCreationTimestamp="2025-11-25 18:13:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:15:56.489574749 +0000 UTC m=+186.875088354" watchObservedRunningTime="2025-11-25 18:15:56.49335619 +0000 UTC m=+186.878869795" Nov 25 18:15:57 crc kubenswrapper[4926]: I1125 18:15:57.653031 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 18:15:58 crc kubenswrapper[4926]: I1125 18:15:58.167472 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-w7m5b"] Nov 25 18:16:03 crc kubenswrapper[4926]: I1125 18:16:03.540973 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:16:03 crc kubenswrapper[4926]: I1125 18:16:03.541360 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:16:05 crc kubenswrapper[4926]: I1125 18:16:05.526692 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lcpfw" event={"ID":"331da98f-d117-4363-bc17-2bbd84a7f7d7","Type":"ContainerStarted","Data":"1475ebca088901738d407ff04318b352102da56e7222aa58e7605afb59aa635d"} Nov 25 18:16:06 crc kubenswrapper[4926]: I1125 18:16:06.535800 4926 generic.go:334] "Generic (PLEG): container finished" podID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerID="1475ebca088901738d407ff04318b352102da56e7222aa58e7605afb59aa635d" exitCode=0 Nov 25 18:16:06 crc kubenswrapper[4926]: I1125 18:16:06.535873 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lcpfw" event={"ID":"331da98f-d117-4363-bc17-2bbd84a7f7d7","Type":"ContainerDied","Data":"1475ebca088901738d407ff04318b352102da56e7222aa58e7605afb59aa635d"} Nov 25 18:16:06 crc kubenswrapper[4926]: I1125 18:16:06.542102 4926 generic.go:334] "Generic (PLEG): container finished" podID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerID="3bd9e7d1977023c1707ff1e02a68f1b4a79bbe4b32c2ffeea9d5981a1e97e9cb" exitCode=0 Nov 25 18:16:06 crc kubenswrapper[4926]: I1125 18:16:06.542137 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bqtsc" event={"ID":"f58da4a7-76aa-47fc-89d9-1da13a4fe67b","Type":"ContainerDied","Data":"3bd9e7d1977023c1707ff1e02a68f1b4a79bbe4b32c2ffeea9d5981a1e97e9cb"} Nov 25 18:16:07 crc kubenswrapper[4926]: I1125 18:16:07.550689 4926 generic.go:334] "Generic (PLEG): container finished" podID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerID="b685188d113bd917c7ca28f4c542a1c9bee2890dcf81b99f841d192c0efb8828" exitCode=0 Nov 25 18:16:07 crc kubenswrapper[4926]: I1125 18:16:07.550792 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6frbc" event={"ID":"130f3c89-e3ab-4b56-b499-7fa327ab5822","Type":"ContainerDied","Data":"b685188d113bd917c7ca28f4c542a1c9bee2890dcf81b99f841d192c0efb8828"} Nov 25 18:16:07 crc kubenswrapper[4926]: I1125 18:16:07.553818 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bqtsc" event={"ID":"f58da4a7-76aa-47fc-89d9-1da13a4fe67b","Type":"ContainerStarted","Data":"e9ce17ee17f45561d3c67a623b24e8e5e210c30c3d4829c719236ead4158fef6"} Nov 25 18:16:07 crc kubenswrapper[4926]: I1125 18:16:07.556883 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lcpfw" event={"ID":"331da98f-d117-4363-bc17-2bbd84a7f7d7","Type":"ContainerStarted","Data":"41fed66acb251c07aa6e4e1ec0024994c25fe77fbef15ae1b0345518191cfc8f"} Nov 25 18:16:07 crc kubenswrapper[4926]: I1125 18:16:07.591824 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lcpfw" podStartSLOduration=2.572179995 podStartE2EDuration="47.591808048s" podCreationTimestamp="2025-11-25 18:15:20 +0000 UTC" firstStartedPulling="2025-11-25 18:15:22.127784279 +0000 UTC m=+152.513297884" lastFinishedPulling="2025-11-25 18:16:07.147412332 +0000 UTC m=+197.532925937" observedRunningTime="2025-11-25 18:16:07.589585879 +0000 UTC m=+197.975099484" watchObservedRunningTime="2025-11-25 18:16:07.591808048 +0000 UTC m=+197.977321673" Nov 25 18:16:07 crc kubenswrapper[4926]: I1125 18:16:07.607463 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bqtsc" podStartSLOduration=2.573143617 podStartE2EDuration="47.607443109s" podCreationTimestamp="2025-11-25 18:15:20 +0000 UTC" firstStartedPulling="2025-11-25 18:15:22.106969935 +0000 UTC m=+152.492483540" lastFinishedPulling="2025-11-25 18:16:07.141269417 +0000 UTC m=+197.526783032" observedRunningTime="2025-11-25 18:16:07.60561123 +0000 UTC m=+197.991124835" watchObservedRunningTime="2025-11-25 18:16:07.607443109 +0000 UTC m=+197.992956724" Nov 25 18:16:08 crc kubenswrapper[4926]: I1125 18:16:08.563719 4926 generic.go:334] "Generic (PLEG): container finished" podID="d060967d-136d-4023-8178-ad4b5270cd12" containerID="c21306f06b9d7afe1859d80c8c35d043520f8aceea654ff91a682eb4cdb934a9" exitCode=0 Nov 25 18:16:08 crc kubenswrapper[4926]: I1125 18:16:08.563791 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dz9mp" event={"ID":"d060967d-136d-4023-8178-ad4b5270cd12","Type":"ContainerDied","Data":"c21306f06b9d7afe1859d80c8c35d043520f8aceea654ff91a682eb4cdb934a9"} Nov 25 18:16:08 crc kubenswrapper[4926]: I1125 18:16:08.567618 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6frbc" event={"ID":"130f3c89-e3ab-4b56-b499-7fa327ab5822","Type":"ContainerStarted","Data":"e1de06e5faa32e2ca28de67b3595444bf54f71b72244e47ac349946b8a6fa4e0"} Nov 25 18:16:08 crc kubenswrapper[4926]: I1125 18:16:08.593805 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6frbc" podStartSLOduration=2.622520202 podStartE2EDuration="50.593788347s" podCreationTimestamp="2025-11-25 18:15:18 +0000 UTC" firstStartedPulling="2025-11-25 18:15:20.029943769 +0000 UTC m=+150.415457384" lastFinishedPulling="2025-11-25 18:16:08.001211924 +0000 UTC m=+198.386725529" observedRunningTime="2025-11-25 18:16:08.590766766 +0000 UTC m=+198.976280381" watchObservedRunningTime="2025-11-25 18:16:08.593788347 +0000 UTC m=+198.979301952" Nov 25 18:16:09 crc kubenswrapper[4926]: I1125 18:16:09.575633 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dz9mp" event={"ID":"d060967d-136d-4023-8178-ad4b5270cd12","Type":"ContainerStarted","Data":"eea0bcc1bd98ce8c809f081f0a13347e4805ba0f6a49be284153d3584caaef4c"} Nov 25 18:16:09 crc kubenswrapper[4926]: I1125 18:16:09.577534 4926 generic.go:334] "Generic (PLEG): container finished" podID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerID="0426ca90be1fe025e5171f6601e3ad00b1ba989cbe71bf665198b80b5bf9540f" exitCode=0 Nov 25 18:16:09 crc kubenswrapper[4926]: I1125 18:16:09.577597 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mnpbw" event={"ID":"fb1af6de-8249-426c-a3ab-2ba9e009c1d7","Type":"ContainerDied","Data":"0426ca90be1fe025e5171f6601e3ad00b1ba989cbe71bf665198b80b5bf9540f"} Nov 25 18:16:09 crc kubenswrapper[4926]: I1125 18:16:09.581620 4926 generic.go:334] "Generic (PLEG): container finished" podID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerID="77390ca48c916fd0d5568943ecfc645710126d7ebeee5286305636a34be927d7" exitCode=0 Nov 25 18:16:09 crc kubenswrapper[4926]: I1125 18:16:09.581668 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skkd4" event={"ID":"929a2b2c-f4a1-47b4-ab76-327d9b68b730","Type":"ContainerDied","Data":"77390ca48c916fd0d5568943ecfc645710126d7ebeee5286305636a34be927d7"} Nov 25 18:16:09 crc kubenswrapper[4926]: I1125 18:16:09.601234 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dz9mp" podStartSLOduration=3.601588076 podStartE2EDuration="52.601214943s" podCreationTimestamp="2025-11-25 18:15:17 +0000 UTC" firstStartedPulling="2025-11-25 18:15:20.013899464 +0000 UTC m=+150.399413069" lastFinishedPulling="2025-11-25 18:16:09.013526321 +0000 UTC m=+199.399039936" observedRunningTime="2025-11-25 18:16:09.598976392 +0000 UTC m=+199.984490017" watchObservedRunningTime="2025-11-25 18:16:09.601214943 +0000 UTC m=+199.986728548" Nov 25 18:16:10 crc kubenswrapper[4926]: I1125 18:16:10.483419 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:16:10 crc kubenswrapper[4926]: I1125 18:16:10.483469 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:16:10 crc kubenswrapper[4926]: I1125 18:16:10.588040 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skkd4" event={"ID":"929a2b2c-f4a1-47b4-ab76-327d9b68b730","Type":"ContainerStarted","Data":"83f65c7e7a728dbebd9cf6b964d15a81b78a098ec19ef2eab51427dc13f31038"} Nov 25 18:16:10 crc kubenswrapper[4926]: I1125 18:16:10.590395 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mnpbw" event={"ID":"fb1af6de-8249-426c-a3ab-2ba9e009c1d7","Type":"ContainerStarted","Data":"e62d11e788a849850d5bbca989e1962e61d4a3dd1cffa52c13f5b52262f2ad1f"} Nov 25 18:16:10 crc kubenswrapper[4926]: I1125 18:16:10.621320 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-skkd4" podStartSLOduration=3.65961882 podStartE2EDuration="53.621302518s" podCreationTimestamp="2025-11-25 18:15:17 +0000 UTC" firstStartedPulling="2025-11-25 18:15:20.006085337 +0000 UTC m=+150.391598942" lastFinishedPulling="2025-11-25 18:16:09.967769035 +0000 UTC m=+200.353282640" observedRunningTime="2025-11-25 18:16:10.619715585 +0000 UTC m=+201.005229200" watchObservedRunningTime="2025-11-25 18:16:10.621302518 +0000 UTC m=+201.006816123" Nov 25 18:16:10 crc kubenswrapper[4926]: I1125 18:16:10.638817 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mnpbw" podStartSLOduration=3.728557606 podStartE2EDuration="53.638797069s" podCreationTimestamp="2025-11-25 18:15:17 +0000 UTC" firstStartedPulling="2025-11-25 18:15:20.039049794 +0000 UTC m=+150.424563389" lastFinishedPulling="2025-11-25 18:16:09.949289247 +0000 UTC m=+200.334802852" observedRunningTime="2025-11-25 18:16:10.635452679 +0000 UTC m=+201.020966294" watchObservedRunningTime="2025-11-25 18:16:10.638797069 +0000 UTC m=+201.024310674" Nov 25 18:16:10 crc kubenswrapper[4926]: I1125 18:16:10.715908 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:16:11 crc kubenswrapper[4926]: I1125 18:16:11.115309 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:16:11 crc kubenswrapper[4926]: I1125 18:16:11.115687 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:16:11 crc kubenswrapper[4926]: I1125 18:16:11.608187 4926 generic.go:334] "Generic (PLEG): container finished" podID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerID="53a8504f4b080f00a2a3ed89f8045ed1fcbf5bed84900e5a5efbdbb5c1fddbf0" exitCode=0 Nov 25 18:16:11 crc kubenswrapper[4926]: I1125 18:16:11.608281 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7nrb" event={"ID":"b6ea68a1-b0ef-4035-9466-643ca03fc8a6","Type":"ContainerDied","Data":"53a8504f4b080f00a2a3ed89f8045ed1fcbf5bed84900e5a5efbdbb5c1fddbf0"} Nov 25 18:16:11 crc kubenswrapper[4926]: I1125 18:16:11.611698 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nnr2" event={"ID":"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4","Type":"ContainerStarted","Data":"24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63"} Nov 25 18:16:12 crc kubenswrapper[4926]: I1125 18:16:12.151115 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lcpfw" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="registry-server" probeResult="failure" output=< Nov 25 18:16:12 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:16:12 crc kubenswrapper[4926]: > Nov 25 18:16:12 crc kubenswrapper[4926]: I1125 18:16:12.618721 4926 generic.go:334] "Generic (PLEG): container finished" podID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerID="24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63" exitCode=0 Nov 25 18:16:12 crc kubenswrapper[4926]: I1125 18:16:12.618769 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nnr2" event={"ID":"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4","Type":"ContainerDied","Data":"24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63"} Nov 25 18:16:16 crc kubenswrapper[4926]: I1125 18:16:16.639942 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7nrb" event={"ID":"b6ea68a1-b0ef-4035-9466-643ca03fc8a6","Type":"ContainerStarted","Data":"2eae1527b148cfc81f6aafbaa0b0fdacc26c03d1358dcab154f5f23394a2f46e"} Nov 25 18:16:17 crc kubenswrapper[4926]: I1125 18:16:17.661284 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-w7nrb" podStartSLOduration=4.222452509 podStartE2EDuration="58.661269612s" podCreationTimestamp="2025-11-25 18:15:19 +0000 UTC" firstStartedPulling="2025-11-25 18:15:21.050831131 +0000 UTC m=+151.436344736" lastFinishedPulling="2025-11-25 18:16:15.489648234 +0000 UTC m=+205.875161839" observedRunningTime="2025-11-25 18:16:17.659417222 +0000 UTC m=+208.044930827" watchObservedRunningTime="2025-11-25 18:16:17.661269612 +0000 UTC m=+208.046783217" Nov 25 18:16:17 crc kubenswrapper[4926]: I1125 18:16:17.910856 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:16:17 crc kubenswrapper[4926]: I1125 18:16:17.910927 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:16:17 crc kubenswrapper[4926]: I1125 18:16:17.948739 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.360754 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.360817 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.396552 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.397588 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.397856 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.435163 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.525904 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.525992 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.594282 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.651477 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nnr2" event={"ID":"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4","Type":"ContainerStarted","Data":"87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94"} Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.666545 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2nnr2" podStartSLOduration=3.345252068 podStartE2EDuration="57.66652639s" podCreationTimestamp="2025-11-25 18:15:21 +0000 UTC" firstStartedPulling="2025-11-25 18:15:23.210538567 +0000 UTC m=+153.596052182" lastFinishedPulling="2025-11-25 18:16:17.531812899 +0000 UTC m=+207.917326504" observedRunningTime="2025-11-25 18:16:18.665094331 +0000 UTC m=+209.050607936" watchObservedRunningTime="2025-11-25 18:16:18.66652639 +0000 UTC m=+209.052039995" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.695985 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.697579 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.698798 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:16:18 crc kubenswrapper[4926]: I1125 18:16:18.701669 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.092236 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.092619 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.136914 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.519245 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.577657 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dz9mp"] Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.676932 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dz9mp" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="registry-server" containerID="cri-o://eea0bcc1bd98ce8c809f081f0a13347e4805ba0f6a49be284153d3584caaef4c" gracePeriod=2 Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.762799 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6frbc"] Nov 25 18:16:20 crc kubenswrapper[4926]: I1125 18:16:20.763002 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6frbc" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="registry-server" containerID="cri-o://e1de06e5faa32e2ca28de67b3595444bf54f71b72244e47ac349946b8a6fa4e0" gracePeriod=2 Nov 25 18:16:21 crc kubenswrapper[4926]: I1125 18:16:21.151072 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:16:21 crc kubenswrapper[4926]: I1125 18:16:21.192154 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:16:21 crc kubenswrapper[4926]: I1125 18:16:21.493315 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:16:21 crc kubenswrapper[4926]: I1125 18:16:21.493404 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:16:22 crc kubenswrapper[4926]: I1125 18:16:22.531297 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2nnr2" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="registry-server" probeResult="failure" output=< Nov 25 18:16:22 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:16:22 crc kubenswrapper[4926]: > Nov 25 18:16:22 crc kubenswrapper[4926]: I1125 18:16:22.687433 4926 generic.go:334] "Generic (PLEG): container finished" podID="d060967d-136d-4023-8178-ad4b5270cd12" containerID="eea0bcc1bd98ce8c809f081f0a13347e4805ba0f6a49be284153d3584caaef4c" exitCode=0 Nov 25 18:16:22 crc kubenswrapper[4926]: I1125 18:16:22.687507 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dz9mp" event={"ID":"d060967d-136d-4023-8178-ad4b5270cd12","Type":"ContainerDied","Data":"eea0bcc1bd98ce8c809f081f0a13347e4805ba0f6a49be284153d3584caaef4c"} Nov 25 18:16:22 crc kubenswrapper[4926]: I1125 18:16:22.689544 4926 generic.go:334] "Generic (PLEG): container finished" podID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerID="e1de06e5faa32e2ca28de67b3595444bf54f71b72244e47ac349946b8a6fa4e0" exitCode=0 Nov 25 18:16:22 crc kubenswrapper[4926]: I1125 18:16:22.689577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6frbc" event={"ID":"130f3c89-e3ab-4b56-b499-7fa327ab5822","Type":"ContainerDied","Data":"e1de06e5faa32e2ca28de67b3595444bf54f71b72244e47ac349946b8a6fa4e0"} Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.135227 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.163131 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bqtsc"] Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.163346 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bqtsc" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="registry-server" containerID="cri-o://e9ce17ee17f45561d3c67a623b24e8e5e210c30c3d4829c719236ead4158fef6" gracePeriod=2 Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.193838 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" podUID="3e21465b-c285-4623-9566-f4998c280e16" containerName="oauth-openshift" containerID="cri-o://2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9" gracePeriod=15 Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.257465 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-utilities\") pod \"130f3c89-e3ab-4b56-b499-7fa327ab5822\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.257748 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-catalog-content\") pod \"130f3c89-e3ab-4b56-b499-7fa327ab5822\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.257778 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvlss\" (UniqueName: \"kubernetes.io/projected/130f3c89-e3ab-4b56-b499-7fa327ab5822-kube-api-access-pvlss\") pod \"130f3c89-e3ab-4b56-b499-7fa327ab5822\" (UID: \"130f3c89-e3ab-4b56-b499-7fa327ab5822\") " Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.258583 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-utilities" (OuterVolumeSpecName: "utilities") pod "130f3c89-e3ab-4b56-b499-7fa327ab5822" (UID: "130f3c89-e3ab-4b56-b499-7fa327ab5822"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.263437 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/130f3c89-e3ab-4b56-b499-7fa327ab5822-kube-api-access-pvlss" (OuterVolumeSpecName: "kube-api-access-pvlss") pod "130f3c89-e3ab-4b56-b499-7fa327ab5822" (UID: "130f3c89-e3ab-4b56-b499-7fa327ab5822"). InnerVolumeSpecName "kube-api-access-pvlss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.309054 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "130f3c89-e3ab-4b56-b499-7fa327ab5822" (UID: "130f3c89-e3ab-4b56-b499-7fa327ab5822"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.363442 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.363474 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/130f3c89-e3ab-4b56-b499-7fa327ab5822-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.363487 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvlss\" (UniqueName: \"kubernetes.io/projected/130f3c89-e3ab-4b56-b499-7fa327ab5822-kube-api-access-pvlss\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.469333 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.565926 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-catalog-content\") pod \"d060967d-136d-4023-8178-ad4b5270cd12\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.566019 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6849\" (UniqueName: \"kubernetes.io/projected/d060967d-136d-4023-8178-ad4b5270cd12-kube-api-access-r6849\") pod \"d060967d-136d-4023-8178-ad4b5270cd12\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.566055 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-utilities\") pod \"d060967d-136d-4023-8178-ad4b5270cd12\" (UID: \"d060967d-136d-4023-8178-ad4b5270cd12\") " Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.566850 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-utilities" (OuterVolumeSpecName: "utilities") pod "d060967d-136d-4023-8178-ad4b5270cd12" (UID: "d060967d-136d-4023-8178-ad4b5270cd12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.569017 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d060967d-136d-4023-8178-ad4b5270cd12-kube-api-access-r6849" (OuterVolumeSpecName: "kube-api-access-r6849") pod "d060967d-136d-4023-8178-ad4b5270cd12" (UID: "d060967d-136d-4023-8178-ad4b5270cd12"). InnerVolumeSpecName "kube-api-access-r6849". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.608600 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d060967d-136d-4023-8178-ad4b5270cd12" (UID: "d060967d-136d-4023-8178-ad4b5270cd12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.669235 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.669276 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6849\" (UniqueName: \"kubernetes.io/projected/d060967d-136d-4023-8178-ad4b5270cd12-kube-api-access-r6849\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.669289 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d060967d-136d-4023-8178-ad4b5270cd12-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.698346 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dz9mp" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.698334 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dz9mp" event={"ID":"d060967d-136d-4023-8178-ad4b5270cd12","Type":"ContainerDied","Data":"53e6dff7739df21bb36e2b9b79e6874894fa993e8abedc4a992983db0c1907a0"} Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.698520 4926 scope.go:117] "RemoveContainer" containerID="eea0bcc1bd98ce8c809f081f0a13347e4805ba0f6a49be284153d3584caaef4c" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.701062 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6frbc" event={"ID":"130f3c89-e3ab-4b56-b499-7fa327ab5822","Type":"ContainerDied","Data":"6e854c75245fd33e05f114d003b903376d968b5f627d2a5e33a90530e822d13f"} Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.701127 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6frbc" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.712245 4926 scope.go:117] "RemoveContainer" containerID="c21306f06b9d7afe1859d80c8c35d043520f8aceea654ff91a682eb4cdb934a9" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.727958 4926 scope.go:117] "RemoveContainer" containerID="8806999c5fab47111e3c1d034b12e6a8bc509edb084280e5d24405cf40153886" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.735227 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dz9mp"] Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.741509 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dz9mp"] Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.746200 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6frbc"] Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.748566 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6frbc"] Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.750188 4926 scope.go:117] "RemoveContainer" containerID="e1de06e5faa32e2ca28de67b3595444bf54f71b72244e47ac349946b8a6fa4e0" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.886625 4926 scope.go:117] "RemoveContainer" containerID="b685188d113bd917c7ca28f4c542a1c9bee2890dcf81b99f841d192c0efb8828" Nov 25 18:16:23 crc kubenswrapper[4926]: I1125 18:16:23.899516 4926 scope.go:117] "RemoveContainer" containerID="e66b87e267ebc08866a734ad21b1e9d4d61e96140746f2e06ffa8ad12069899a" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.336009 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" path="/var/lib/kubelet/pods/130f3c89-e3ab-4b56-b499-7fa327ab5822/volumes" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.337543 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d060967d-136d-4023-8178-ad4b5270cd12" path="/var/lib/kubelet/pods/d060967d-136d-4023-8178-ad4b5270cd12/volumes" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.655619 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.711445 4926 generic.go:334] "Generic (PLEG): container finished" podID="3e21465b-c285-4623-9566-f4998c280e16" containerID="2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9" exitCode=0 Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.711499 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" event={"ID":"3e21465b-c285-4623-9566-f4998c280e16","Type":"ContainerDied","Data":"2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9"} Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.711525 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" event={"ID":"3e21465b-c285-4623-9566-f4998c280e16","Type":"ContainerDied","Data":"fa25fa511c2350763f1a0722695d73884c9179b95e5d60ca7671f51f4de3bca1"} Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.711541 4926 scope.go:117] "RemoveContainer" containerID="2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.711605 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-w7m5b" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.714781 4926 generic.go:334] "Generic (PLEG): container finished" podID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerID="e9ce17ee17f45561d3c67a623b24e8e5e210c30c3d4829c719236ead4158fef6" exitCode=0 Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.714827 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bqtsc" event={"ID":"f58da4a7-76aa-47fc-89d9-1da13a4fe67b","Type":"ContainerDied","Data":"e9ce17ee17f45561d3c67a623b24e8e5e210c30c3d4829c719236ead4158fef6"} Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.749588 4926 scope.go:117] "RemoveContainer" containerID="2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9" Nov 25 18:16:24 crc kubenswrapper[4926]: E1125 18:16:24.750114 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9\": container with ID starting with 2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9 not found: ID does not exist" containerID="2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.750148 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9"} err="failed to get container status \"2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9\": rpc error: code = NotFound desc = could not find container \"2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9\": container with ID starting with 2c139433b4e273298e5e494bde7a261e0a0559535d54045415ab9b690ac906c9 not found: ID does not exist" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.750868 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788666 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-provider-selection\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788711 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfnzx\" (UniqueName: \"kubernetes.io/projected/3e21465b-c285-4623-9566-f4998c280e16-kube-api-access-rfnzx\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788745 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-service-ca\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788766 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-session\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788783 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-serving-cert\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788806 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-idp-0-file-data\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788866 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-cliconfig\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788887 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-router-certs\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788918 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-audit-policies\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788933 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-login\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788970 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-ocp-branding-template\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.788984 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-trusted-ca-bundle\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.789015 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e21465b-c285-4623-9566-f4998c280e16-audit-dir\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.789031 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-error\") pod \"3e21465b-c285-4623-9566-f4998c280e16\" (UID: \"3e21465b-c285-4623-9566-f4998c280e16\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.790228 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.792768 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.793190 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.795404 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.795825 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.796318 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.797850 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.798149 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.798272 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e21465b-c285-4623-9566-f4998c280e16-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.799577 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.802675 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e21465b-c285-4623-9566-f4998c280e16-kube-api-access-rfnzx" (OuterVolumeSpecName: "kube-api-access-rfnzx") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "kube-api-access-rfnzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.802808 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.802838 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.803086 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "3e21465b-c285-4623-9566-f4998c280e16" (UID: "3e21465b-c285-4623-9566-f4998c280e16"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890193 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xw8v\" (UniqueName: \"kubernetes.io/projected/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-kube-api-access-8xw8v\") pod \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890287 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-catalog-content\") pod \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890317 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-utilities\") pod \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\" (UID: \"f58da4a7-76aa-47fc-89d9-1da13a4fe67b\") " Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890524 4926 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e21465b-c285-4623-9566-f4998c280e16-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890537 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890548 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890557 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfnzx\" (UniqueName: \"kubernetes.io/projected/3e21465b-c285-4623-9566-f4998c280e16-kube-api-access-rfnzx\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890568 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890576 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890585 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890595 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890605 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890613 4926 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890623 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890632 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890641 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.890650 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e21465b-c285-4623-9566-f4998c280e16-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.891136 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-utilities" (OuterVolumeSpecName: "utilities") pod "f58da4a7-76aa-47fc-89d9-1da13a4fe67b" (UID: "f58da4a7-76aa-47fc-89d9-1da13a4fe67b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.894616 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-kube-api-access-8xw8v" (OuterVolumeSpecName: "kube-api-access-8xw8v") pod "f58da4a7-76aa-47fc-89d9-1da13a4fe67b" (UID: "f58da4a7-76aa-47fc-89d9-1da13a4fe67b"). InnerVolumeSpecName "kube-api-access-8xw8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.907348 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f58da4a7-76aa-47fc-89d9-1da13a4fe67b" (UID: "f58da4a7-76aa-47fc-89d9-1da13a4fe67b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.992060 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xw8v\" (UniqueName: \"kubernetes.io/projected/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-kube-api-access-8xw8v\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.992114 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:24 crc kubenswrapper[4926]: I1125 18:16:24.992128 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f58da4a7-76aa-47fc-89d9-1da13a4fe67b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.035784 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-w7m5b"] Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.039111 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-w7m5b"] Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.693388 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-54f75f9d4b-psdkj"] Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694174 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="extract-content" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694188 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="extract-content" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694201 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="extract-utilities" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694207 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="extract-utilities" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694218 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e21465b-c285-4623-9566-f4998c280e16" containerName="oauth-openshift" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694227 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e21465b-c285-4623-9566-f4998c280e16" containerName="oauth-openshift" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694241 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="extract-content" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694247 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="extract-content" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694256 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="extract-utilities" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694265 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="extract-utilities" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694275 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694282 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694292 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694298 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694308 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="extract-utilities" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694314 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="extract-utilities" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694328 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52fed8b5-813f-4a7f-9f6d-31d525a0aa16" containerName="pruner" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694334 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52fed8b5-813f-4a7f-9f6d-31d525a0aa16" containerName="pruner" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694341 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694347 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: E1125 18:16:25.694354 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="extract-content" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694360 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="extract-content" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694562 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694583 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e21465b-c285-4623-9566-f4998c280e16" containerName="oauth-openshift" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694592 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="130f3c89-e3ab-4b56-b499-7fa327ab5822" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694600 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="52fed8b5-813f-4a7f-9f6d-31d525a0aa16" containerName="pruner" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.694611 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d060967d-136d-4023-8178-ad4b5270cd12" containerName="registry-server" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.695213 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.699296 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.699980 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700198 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700323 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700364 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700465 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700495 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700529 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700559 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.700680 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.701351 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.701817 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.707151 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.711948 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54f75f9d4b-psdkj"] Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.712748 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.722470 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.729270 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bqtsc" event={"ID":"f58da4a7-76aa-47fc-89d9-1da13a4fe67b","Type":"ContainerDied","Data":"962e5f89d67c0c20822acf30af36c883bcaa80490df297086b931ec6fed4a4aa"} Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.729353 4926 scope.go:117] "RemoveContainer" containerID="e9ce17ee17f45561d3c67a623b24e8e5e210c30c3d4829c719236ead4158fef6" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.729489 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bqtsc" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.754704 4926 scope.go:117] "RemoveContainer" containerID="3bd9e7d1977023c1707ff1e02a68f1b4a79bbe4b32c2ffeea9d5981a1e97e9cb" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.767927 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bqtsc"] Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.770392 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bqtsc"] Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.798012 4926 scope.go:117] "RemoveContainer" containerID="956a0113267575c0c32e90f91855fcc5edd970c931657d1bd59ee8f77c76cacd" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802222 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-session\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802260 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-login\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802289 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74f71347-fbc9-4712-9aa7-452aa49f481e-audit-dir\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802312 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4dt9\" (UniqueName: \"kubernetes.io/projected/74f71347-fbc9-4712-9aa7-452aa49f481e-kube-api-access-w4dt9\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802400 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802462 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-error\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802487 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-audit-policies\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802547 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802568 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802583 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802602 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-router-certs\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802680 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802704 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-service-ca\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.802722 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903579 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903640 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-error\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-audit-policies\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903695 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903712 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903725 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903742 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-router-certs\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903775 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903792 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-service-ca\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903807 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903832 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-session\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903849 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-login\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903864 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74f71347-fbc9-4712-9aa7-452aa49f481e-audit-dir\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.903883 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4dt9\" (UniqueName: \"kubernetes.io/projected/74f71347-fbc9-4712-9aa7-452aa49f481e-kube-api-access-w4dt9\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.905137 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74f71347-fbc9-4712-9aa7-452aa49f481e-audit-dir\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.905227 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-audit-policies\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.905562 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-service-ca\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.906048 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.906534 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.908089 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-error\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.908504 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.909099 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.909221 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-login\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.909318 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-session\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.909564 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.909573 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-router-certs\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.910557 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/74f71347-fbc9-4712-9aa7-452aa49f481e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:25 crc kubenswrapper[4926]: I1125 18:16:25.918727 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4dt9\" (UniqueName: \"kubernetes.io/projected/74f71347-fbc9-4712-9aa7-452aa49f481e-kube-api-access-w4dt9\") pod \"oauth-openshift-54f75f9d4b-psdkj\" (UID: \"74f71347-fbc9-4712-9aa7-452aa49f481e\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:26 crc kubenswrapper[4926]: I1125 18:16:26.018270 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:26 crc kubenswrapper[4926]: I1125 18:16:26.335831 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e21465b-c285-4623-9566-f4998c280e16" path="/var/lib/kubelet/pods/3e21465b-c285-4623-9566-f4998c280e16/volumes" Nov 25 18:16:26 crc kubenswrapper[4926]: I1125 18:16:26.336393 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f58da4a7-76aa-47fc-89d9-1da13a4fe67b" path="/var/lib/kubelet/pods/f58da4a7-76aa-47fc-89d9-1da13a4fe67b/volumes" Nov 25 18:16:26 crc kubenswrapper[4926]: I1125 18:16:26.411690 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54f75f9d4b-psdkj"] Nov 25 18:16:26 crc kubenswrapper[4926]: W1125 18:16:26.412418 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74f71347_fbc9_4712_9aa7_452aa49f481e.slice/crio-5c7f517553d30322fc065faf18a360758fc9ea55bc384f40b2d03429083ff3db WatchSource:0}: Error finding container 5c7f517553d30322fc065faf18a360758fc9ea55bc384f40b2d03429083ff3db: Status 404 returned error can't find the container with id 5c7f517553d30322fc065faf18a360758fc9ea55bc384f40b2d03429083ff3db Nov 25 18:16:26 crc kubenswrapper[4926]: I1125 18:16:26.738346 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" event={"ID":"74f71347-fbc9-4712-9aa7-452aa49f481e","Type":"ContainerStarted","Data":"5c7f517553d30322fc065faf18a360758fc9ea55bc384f40b2d03429083ff3db"} Nov 25 18:16:27 crc kubenswrapper[4926]: I1125 18:16:27.745974 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" event={"ID":"74f71347-fbc9-4712-9aa7-452aa49f481e","Type":"ContainerStarted","Data":"fe4d30474a2d599a8ab1e227a49687d594ad304b4e9fde16aed17a10b7671a70"} Nov 25 18:16:27 crc kubenswrapper[4926]: I1125 18:16:27.746386 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:27 crc kubenswrapper[4926]: I1125 18:16:27.753067 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" Nov 25 18:16:27 crc kubenswrapper[4926]: I1125 18:16:27.769203 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-54f75f9d4b-psdkj" podStartSLOduration=29.769182132 podStartE2EDuration="29.769182132s" podCreationTimestamp="2025-11-25 18:15:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:16:27.765033761 +0000 UTC m=+218.150547386" watchObservedRunningTime="2025-11-25 18:16:27.769182132 +0000 UTC m=+218.154695737" Nov 25 18:16:30 crc kubenswrapper[4926]: I1125 18:16:30.125975 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:16:31 crc kubenswrapper[4926]: I1125 18:16:31.528916 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:16:31 crc kubenswrapper[4926]: I1125 18:16:31.565884 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.541473 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.541843 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.541907 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.542384 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.542437 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1" gracePeriod=600 Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.764837 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2nnr2"] Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.765080 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2nnr2" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="registry-server" containerID="cri-o://87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94" gracePeriod=2 Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.785344 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1" exitCode=0 Nov 25 18:16:33 crc kubenswrapper[4926]: I1125 18:16:33.785421 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1"} Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.136000 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.243284 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9kgz\" (UniqueName: \"kubernetes.io/projected/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-kube-api-access-k9kgz\") pod \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.243339 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-catalog-content\") pod \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.243413 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-utilities\") pod \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\" (UID: \"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4\") " Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.244461 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-utilities" (OuterVolumeSpecName: "utilities") pod "14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" (UID: "14fd9c00-7eb5-46d1-a42b-c3b79ff781a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.248892 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-kube-api-access-k9kgz" (OuterVolumeSpecName: "kube-api-access-k9kgz") pod "14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" (UID: "14fd9c00-7eb5-46d1-a42b-c3b79ff781a4"). InnerVolumeSpecName "kube-api-access-k9kgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.323361 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" (UID: "14fd9c00-7eb5-46d1-a42b-c3b79ff781a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.344356 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.344401 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.344417 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9kgz\" (UniqueName: \"kubernetes.io/projected/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4-kube-api-access-k9kgz\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.791984 4926 generic.go:334] "Generic (PLEG): container finished" podID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerID="87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94" exitCode=0 Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.792064 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nnr2" event={"ID":"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4","Type":"ContainerDied","Data":"87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94"} Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.792105 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nnr2" event={"ID":"14fd9c00-7eb5-46d1-a42b-c3b79ff781a4","Type":"ContainerDied","Data":"bae3fabae3814e73f350b03ed9b94dc98908dab978ea8f3bd9e1bc05eacc6d45"} Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.792103 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nnr2" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.792164 4926 scope.go:117] "RemoveContainer" containerID="87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.793963 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"2605307c38a76c7ce73c047e48783a7fd79aada84e3b313e0f1c6214104df2a9"} Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.809908 4926 scope.go:117] "RemoveContainer" containerID="24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.832414 4926 scope.go:117] "RemoveContainer" containerID="a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.837007 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2nnr2"] Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.841498 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2nnr2"] Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.854720 4926 scope.go:117] "RemoveContainer" containerID="87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94" Nov 25 18:16:34 crc kubenswrapper[4926]: E1125 18:16:34.855842 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94\": container with ID starting with 87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94 not found: ID does not exist" containerID="87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.855884 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94"} err="failed to get container status \"87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94\": rpc error: code = NotFound desc = could not find container \"87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94\": container with ID starting with 87819f29f3f2d343635d5a2e11862994a9523a1b0891bc3bab1fe4721a9d8b94 not found: ID does not exist" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.855911 4926 scope.go:117] "RemoveContainer" containerID="24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63" Nov 25 18:16:34 crc kubenswrapper[4926]: E1125 18:16:34.856222 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63\": container with ID starting with 24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63 not found: ID does not exist" containerID="24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.856247 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63"} err="failed to get container status \"24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63\": rpc error: code = NotFound desc = could not find container \"24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63\": container with ID starting with 24d086aaee61bd04d1300ba0c4a41f8f09fbfa5e6b7cc1ed605a66f432e4ae63 not found: ID does not exist" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.856263 4926 scope.go:117] "RemoveContainer" containerID="a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf" Nov 25 18:16:34 crc kubenswrapper[4926]: E1125 18:16:34.856540 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf\": container with ID starting with a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf not found: ID does not exist" containerID="a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf" Nov 25 18:16:34 crc kubenswrapper[4926]: I1125 18:16:34.856581 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf"} err="failed to get container status \"a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf\": rpc error: code = NotFound desc = could not find container \"a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf\": container with ID starting with a54b595ba86f2a82cd3aa0139701d1aebab7e817d1074a51b48d28f9db9cbebf not found: ID does not exist" Nov 25 18:16:36 crc kubenswrapper[4926]: I1125 18:16:36.336500 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" path="/var/lib/kubelet/pods/14fd9c00-7eb5-46d1-a42b-c3b79ff781a4/volumes" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.564690 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mnpbw"] Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.565852 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mnpbw" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="registry-server" containerID="cri-o://e62d11e788a849850d5bbca989e1962e61d4a3dd1cffa52c13f5b52262f2ad1f" gracePeriod=30 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.571917 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-skkd4"] Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.572147 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-skkd4" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="registry-server" containerID="cri-o://83f65c7e7a728dbebd9cf6b964d15a81b78a098ec19ef2eab51427dc13f31038" gracePeriod=30 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.580024 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m6qg7"] Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.580279 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" podUID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" containerName="marketplace-operator" containerID="cri-o://3b259915f2d0cb1192c24f68f34b665fc3982b67a6b747f7269c9940563312de" gracePeriod=30 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.588459 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7nrb"] Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.588713 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-w7nrb" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="registry-server" containerID="cri-o://2eae1527b148cfc81f6aafbaa0b0fdacc26c03d1358dcab154f5f23394a2f46e" gracePeriod=30 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.602529 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5xksb"] Nov 25 18:16:45 crc kubenswrapper[4926]: E1125 18:16:45.602916 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="extract-utilities" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.602939 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="extract-utilities" Nov 25 18:16:45 crc kubenswrapper[4926]: E1125 18:16:45.602950 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="registry-server" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.602959 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="registry-server" Nov 25 18:16:45 crc kubenswrapper[4926]: E1125 18:16:45.602974 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="extract-content" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.602982 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="extract-content" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.603100 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="14fd9c00-7eb5-46d1-a42b-c3b79ff781a4" containerName="registry-server" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.603681 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.606540 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lcpfw"] Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.606835 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lcpfw" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="registry-server" containerID="cri-o://41fed66acb251c07aa6e4e1ec0024994c25fe77fbef15ae1b0345518191cfc8f" gracePeriod=30 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.618394 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5xksb"] Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.690222 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q46pm\" (UniqueName: \"kubernetes.io/projected/679c6a97-f755-4e07-8d02-13b4ab9616d1-kube-api-access-q46pm\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.690456 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/679c6a97-f755-4e07-8d02-13b4ab9616d1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.690646 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/679c6a97-f755-4e07-8d02-13b4ab9616d1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.791845 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/679c6a97-f755-4e07-8d02-13b4ab9616d1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.791912 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/679c6a97-f755-4e07-8d02-13b4ab9616d1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.791938 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q46pm\" (UniqueName: \"kubernetes.io/projected/679c6a97-f755-4e07-8d02-13b4ab9616d1-kube-api-access-q46pm\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.793453 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/679c6a97-f755-4e07-8d02-13b4ab9616d1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.802552 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/679c6a97-f755-4e07-8d02-13b4ab9616d1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.808115 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q46pm\" (UniqueName: \"kubernetes.io/projected/679c6a97-f755-4e07-8d02-13b4ab9616d1-kube-api-access-q46pm\") pod \"marketplace-operator-79b997595-5xksb\" (UID: \"679c6a97-f755-4e07-8d02-13b4ab9616d1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.861460 4926 generic.go:334] "Generic (PLEG): container finished" podID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerID="2eae1527b148cfc81f6aafbaa0b0fdacc26c03d1358dcab154f5f23394a2f46e" exitCode=0 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.861536 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7nrb" event={"ID":"b6ea68a1-b0ef-4035-9466-643ca03fc8a6","Type":"ContainerDied","Data":"2eae1527b148cfc81f6aafbaa0b0fdacc26c03d1358dcab154f5f23394a2f46e"} Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.863751 4926 generic.go:334] "Generic (PLEG): container finished" podID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerID="e62d11e788a849850d5bbca989e1962e61d4a3dd1cffa52c13f5b52262f2ad1f" exitCode=0 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.863962 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mnpbw" event={"ID":"fb1af6de-8249-426c-a3ab-2ba9e009c1d7","Type":"ContainerDied","Data":"e62d11e788a849850d5bbca989e1962e61d4a3dd1cffa52c13f5b52262f2ad1f"} Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.865617 4926 generic.go:334] "Generic (PLEG): container finished" podID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" containerID="3b259915f2d0cb1192c24f68f34b665fc3982b67a6b747f7269c9940563312de" exitCode=0 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.865711 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" event={"ID":"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f","Type":"ContainerDied","Data":"3b259915f2d0cb1192c24f68f34b665fc3982b67a6b747f7269c9940563312de"} Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.867680 4926 generic.go:334] "Generic (PLEG): container finished" podID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerID="83f65c7e7a728dbebd9cf6b964d15a81b78a098ec19ef2eab51427dc13f31038" exitCode=0 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.867729 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skkd4" event={"ID":"929a2b2c-f4a1-47b4-ab76-327d9b68b730","Type":"ContainerDied","Data":"83f65c7e7a728dbebd9cf6b964d15a81b78a098ec19ef2eab51427dc13f31038"} Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.869849 4926 generic.go:334] "Generic (PLEG): container finished" podID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerID="41fed66acb251c07aa6e4e1ec0024994c25fe77fbef15ae1b0345518191cfc8f" exitCode=0 Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.869909 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lcpfw" event={"ID":"331da98f-d117-4363-bc17-2bbd84a7f7d7","Type":"ContainerDied","Data":"41fed66acb251c07aa6e4e1ec0024994c25fe77fbef15ae1b0345518191cfc8f"} Nov 25 18:16:45 crc kubenswrapper[4926]: I1125 18:16:45.966165 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.384752 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5xksb"] Nov 25 18:16:46 crc kubenswrapper[4926]: W1125 18:16:46.427657 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod679c6a97_f755_4e07_8d02_13b4ab9616d1.slice/crio-4c1ca6e1b64f1ce3e17e5fbe75a10efb03a2823cc71073bef36e5338ef02032a WatchSource:0}: Error finding container 4c1ca6e1b64f1ce3e17e5fbe75a10efb03a2823cc71073bef36e5338ef02032a: Status 404 returned error can't find the container with id 4c1ca6e1b64f1ce3e17e5fbe75a10efb03a2823cc71073bef36e5338ef02032a Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.428861 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.543843 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.565578 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.570519 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.600559 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.603350 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64msz\" (UniqueName: \"kubernetes.io/projected/929a2b2c-f4a1-47b4-ab76-327d9b68b730-kube-api-access-64msz\") pod \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.603436 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-catalog-content\") pod \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.603593 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-utilities\") pod \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\" (UID: \"929a2b2c-f4a1-47b4-ab76-327d9b68b730\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.605450 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-utilities" (OuterVolumeSpecName: "utilities") pod "929a2b2c-f4a1-47b4-ab76-327d9b68b730" (UID: "929a2b2c-f4a1-47b4-ab76-327d9b68b730"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.611521 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/929a2b2c-f4a1-47b4-ab76-327d9b68b730-kube-api-access-64msz" (OuterVolumeSpecName: "kube-api-access-64msz") pod "929a2b2c-f4a1-47b4-ab76-327d9b68b730" (UID: "929a2b2c-f4a1-47b4-ab76-327d9b68b730"). InnerVolumeSpecName "kube-api-access-64msz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.659174 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "929a2b2c-f4a1-47b4-ab76-327d9b68b730" (UID: "929a2b2c-f4a1-47b4-ab76-327d9b68b730"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705222 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-catalog-content\") pod \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705282 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87gpn\" (UniqueName: \"kubernetes.io/projected/331da98f-d117-4363-bc17-2bbd84a7f7d7-kube-api-access-87gpn\") pod \"331da98f-d117-4363-bc17-2bbd84a7f7d7\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705339 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-catalog-content\") pod \"331da98f-d117-4363-bc17-2bbd84a7f7d7\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705362 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-operator-metrics\") pod \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705405 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcbt9\" (UniqueName: \"kubernetes.io/projected/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-kube-api-access-zcbt9\") pod \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705420 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-catalog-content\") pod \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705444 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-utilities\") pod \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705468 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kwc2\" (UniqueName: \"kubernetes.io/projected/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-kube-api-access-7kwc2\") pod \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\" (UID: \"b6ea68a1-b0ef-4035-9466-643ca03fc8a6\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705487 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdljv\" (UniqueName: \"kubernetes.io/projected/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-kube-api-access-mdljv\") pod \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705518 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-utilities\") pod \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\" (UID: \"fb1af6de-8249-426c-a3ab-2ba9e009c1d7\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705545 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-utilities\") pod \"331da98f-d117-4363-bc17-2bbd84a7f7d7\" (UID: \"331da98f-d117-4363-bc17-2bbd84a7f7d7\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.705578 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca\") pod \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\" (UID: \"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f\") " Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.707240 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64msz\" (UniqueName: \"kubernetes.io/projected/929a2b2c-f4a1-47b4-ab76-327d9b68b730-kube-api-access-64msz\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.707281 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.707340 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/929a2b2c-f4a1-47b4-ab76-327d9b68b730-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.707319 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-utilities" (OuterVolumeSpecName: "utilities") pod "fb1af6de-8249-426c-a3ab-2ba9e009c1d7" (UID: "fb1af6de-8249-426c-a3ab-2ba9e009c1d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.707805 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-utilities" (OuterVolumeSpecName: "utilities") pod "b6ea68a1-b0ef-4035-9466-643ca03fc8a6" (UID: "b6ea68a1-b0ef-4035-9466-643ca03fc8a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.709056 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/331da98f-d117-4363-bc17-2bbd84a7f7d7-kube-api-access-87gpn" (OuterVolumeSpecName: "kube-api-access-87gpn") pod "331da98f-d117-4363-bc17-2bbd84a7f7d7" (UID: "331da98f-d117-4363-bc17-2bbd84a7f7d7"). InnerVolumeSpecName "kube-api-access-87gpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.710165 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" (UID: "1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.711033 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-utilities" (OuterVolumeSpecName: "utilities") pod "331da98f-d117-4363-bc17-2bbd84a7f7d7" (UID: "331da98f-d117-4363-bc17-2bbd84a7f7d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.727880 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" (UID: "1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.727983 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-kube-api-access-mdljv" (OuterVolumeSpecName: "kube-api-access-mdljv") pod "1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" (UID: "1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f"). InnerVolumeSpecName "kube-api-access-mdljv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.729011 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-kube-api-access-7kwc2" (OuterVolumeSpecName: "kube-api-access-7kwc2") pod "b6ea68a1-b0ef-4035-9466-643ca03fc8a6" (UID: "b6ea68a1-b0ef-4035-9466-643ca03fc8a6"). InnerVolumeSpecName "kube-api-access-7kwc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.730794 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-kube-api-access-zcbt9" (OuterVolumeSpecName: "kube-api-access-zcbt9") pod "fb1af6de-8249-426c-a3ab-2ba9e009c1d7" (UID: "fb1af6de-8249-426c-a3ab-2ba9e009c1d7"). InnerVolumeSpecName "kube-api-access-zcbt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.744119 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b6ea68a1-b0ef-4035-9466-643ca03fc8a6" (UID: "b6ea68a1-b0ef-4035-9466-643ca03fc8a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.765694 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb1af6de-8249-426c-a3ab-2ba9e009c1d7" (UID: "fb1af6de-8249-426c-a3ab-2ba9e009c1d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808299 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808333 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808343 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87gpn\" (UniqueName: \"kubernetes.io/projected/331da98f-d117-4363-bc17-2bbd84a7f7d7-kube-api-access-87gpn\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808351 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808360 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808387 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcbt9\" (UniqueName: \"kubernetes.io/projected/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-kube-api-access-zcbt9\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808399 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808410 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kwc2\" (UniqueName: \"kubernetes.io/projected/b6ea68a1-b0ef-4035-9466-643ca03fc8a6-kube-api-access-7kwc2\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808419 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdljv\" (UniqueName: \"kubernetes.io/projected/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f-kube-api-access-mdljv\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808429 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb1af6de-8249-426c-a3ab-2ba9e009c1d7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.808438 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.815257 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "331da98f-d117-4363-bc17-2bbd84a7f7d7" (UID: "331da98f-d117-4363-bc17-2bbd84a7f7d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.877260 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.877440 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m6qg7" event={"ID":"1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f","Type":"ContainerDied","Data":"e60d52f5425e271f3ed7aebeacce824d322c0976601290d627e85be4a440e43f"} Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.877801 4926 scope.go:117] "RemoveContainer" containerID="3b259915f2d0cb1192c24f68f34b665fc3982b67a6b747f7269c9940563312de" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.883133 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skkd4" event={"ID":"929a2b2c-f4a1-47b4-ab76-327d9b68b730","Type":"ContainerDied","Data":"1d774139cebea438e009e9a0d8eb415481f77376332565f60e059917a304e259"} Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.883284 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skkd4" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.888307 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lcpfw" event={"ID":"331da98f-d117-4363-bc17-2bbd84a7f7d7","Type":"ContainerDied","Data":"a9844d9b2a9bbc389173494ca88bc6fd793ff1c56a80cf55778d47f6e685699d"} Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.888758 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lcpfw" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.894893 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7nrb" event={"ID":"b6ea68a1-b0ef-4035-9466-643ca03fc8a6","Type":"ContainerDied","Data":"f9c3bd4728639144a6e1fe924af0370c48d4921a22ed5eb5cae017095074b102"} Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.895098 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7nrb" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.896506 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" event={"ID":"679c6a97-f755-4e07-8d02-13b4ab9616d1","Type":"ContainerStarted","Data":"d97095f06a2cdd8730b0c6ae37790c1400f0f6f99493452bcf961b7defae0d67"} Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.896532 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" event={"ID":"679c6a97-f755-4e07-8d02-13b4ab9616d1","Type":"ContainerStarted","Data":"4c1ca6e1b64f1ce3e17e5fbe75a10efb03a2823cc71073bef36e5338ef02032a"} Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.896764 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.899260 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mnpbw" event={"ID":"fb1af6de-8249-426c-a3ab-2ba9e009c1d7","Type":"ContainerDied","Data":"86bfbf2e2503a897efeabf46c6b9f87b344ec27425307a878bfc3f58c57a31e0"} Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.899447 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mnpbw" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.900005 4926 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5xksb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" start-of-body= Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.900038 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" podUID="679c6a97-f755-4e07-8d02-13b4ab9616d1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.901485 4926 scope.go:117] "RemoveContainer" containerID="83f65c7e7a728dbebd9cf6b964d15a81b78a098ec19ef2eab51427dc13f31038" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.910107 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331da98f-d117-4363-bc17-2bbd84a7f7d7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.916782 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" podStartSLOduration=1.916760689 podStartE2EDuration="1.916760689s" podCreationTimestamp="2025-11-25 18:16:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:16:46.916123652 +0000 UTC m=+237.301637277" watchObservedRunningTime="2025-11-25 18:16:46.916760689 +0000 UTC m=+237.302274294" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.934631 4926 scope.go:117] "RemoveContainer" containerID="77390ca48c916fd0d5568943ecfc645710126d7ebeee5286305636a34be927d7" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.943618 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m6qg7"] Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.947869 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m6qg7"] Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.966645 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lcpfw"] Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.968881 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lcpfw"] Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.973844 4926 scope.go:117] "RemoveContainer" containerID="7ca0e9f1dc8bbea4c046cbecbc169796a9018a1b0592f047e6fdf62c8a30a7ee" Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.976499 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-skkd4"] Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.981228 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-skkd4"] Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.986472 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7nrb"] Nov 25 18:16:46 crc kubenswrapper[4926]: I1125 18:16:46.994573 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7nrb"] Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.003310 4926 scope.go:117] "RemoveContainer" containerID="41fed66acb251c07aa6e4e1ec0024994c25fe77fbef15ae1b0345518191cfc8f" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.013020 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mnpbw"] Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.021163 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mnpbw"] Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.025583 4926 scope.go:117] "RemoveContainer" containerID="1475ebca088901738d407ff04318b352102da56e7222aa58e7605afb59aa635d" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.047184 4926 scope.go:117] "RemoveContainer" containerID="b80daf079cc3632d5a677a4cf5092c2bb5227dc106d31af0c4bf429705ffdc93" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.063549 4926 scope.go:117] "RemoveContainer" containerID="2eae1527b148cfc81f6aafbaa0b0fdacc26c03d1358dcab154f5f23394a2f46e" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.079041 4926 scope.go:117] "RemoveContainer" containerID="53a8504f4b080f00a2a3ed89f8045ed1fcbf5bed84900e5a5efbdbb5c1fddbf0" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.098497 4926 scope.go:117] "RemoveContainer" containerID="9896705a879e5839ec9a22855fe829e66b12986c5d41e0808604cbb0b5c2fbdc" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.114573 4926 scope.go:117] "RemoveContainer" containerID="e62d11e788a849850d5bbca989e1962e61d4a3dd1cffa52c13f5b52262f2ad1f" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.130045 4926 scope.go:117] "RemoveContainer" containerID="0426ca90be1fe025e5171f6601e3ad00b1ba989cbe71bf665198b80b5bf9540f" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.144646 4926 scope.go:117] "RemoveContainer" containerID="143589e273001538e55794f4cd8d17788398a7c3336ce0b6ca9afb4e6caae2bd" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784010 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5j4h6"] Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784845 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784861 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784873 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784879 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784888 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784895 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784912 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784921 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784931 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784938 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784947 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784955 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784963 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" containerName="marketplace-operator" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784970 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" containerName="marketplace-operator" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784981 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.784986 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.784996 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785002 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.785010 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785015 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.785024 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785030 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="extract-content" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.785038 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785046 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="extract-utilities" Nov 25 18:16:47 crc kubenswrapper[4926]: E1125 18:16:47.785057 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785063 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785157 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785166 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785174 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785185 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" containerName="registry-server" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.785194 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" containerName="marketplace-operator" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.786037 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.794322 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.794872 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5j4h6"] Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.916532 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-5xksb" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.922897 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xld86\" (UniqueName: \"kubernetes.io/projected/757453db-5fee-408c-a5a1-2214d225129d-kube-api-access-xld86\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.922935 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/757453db-5fee-408c-a5a1-2214d225129d-utilities\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.922955 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/757453db-5fee-408c-a5a1-2214d225129d-catalog-content\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.983716 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gxcdm"] Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.985264 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.987535 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 18:16:47 crc kubenswrapper[4926]: I1125 18:16:47.994503 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gxcdm"] Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.024353 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/757453db-5fee-408c-a5a1-2214d225129d-utilities\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.024423 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/757453db-5fee-408c-a5a1-2214d225129d-catalog-content\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.024533 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xld86\" (UniqueName: \"kubernetes.io/projected/757453db-5fee-408c-a5a1-2214d225129d-kube-api-access-xld86\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.025642 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/757453db-5fee-408c-a5a1-2214d225129d-utilities\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.025710 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/757453db-5fee-408c-a5a1-2214d225129d-catalog-content\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.043658 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xld86\" (UniqueName: \"kubernetes.io/projected/757453db-5fee-408c-a5a1-2214d225129d-kube-api-access-xld86\") pod \"redhat-marketplace-5j4h6\" (UID: \"757453db-5fee-408c-a5a1-2214d225129d\") " pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.107736 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.126672 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmdvs\" (UniqueName: \"kubernetes.io/projected/75a6defb-2e36-42a9-85b0-6913304b59a6-kube-api-access-lmdvs\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.126761 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-utilities\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.126838 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-catalog-content\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.228284 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmdvs\" (UniqueName: \"kubernetes.io/projected/75a6defb-2e36-42a9-85b0-6913304b59a6-kube-api-access-lmdvs\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.229979 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-utilities\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.230914 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-utilities\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.231066 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-catalog-content\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.231330 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-catalog-content\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.248909 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmdvs\" (UniqueName: \"kubernetes.io/projected/75a6defb-2e36-42a9-85b0-6913304b59a6-kube-api-access-lmdvs\") pod \"community-operators-gxcdm\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.304162 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.307773 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5j4h6"] Nov 25 18:16:48 crc kubenswrapper[4926]: W1125 18:16:48.327651 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod757453db_5fee_408c_a5a1_2214d225129d.slice/crio-25662f08f1a94f6ebcd396791ddc5535fd92db1b8dd856c32f1ad947d82fd1d9 WatchSource:0}: Error finding container 25662f08f1a94f6ebcd396791ddc5535fd92db1b8dd856c32f1ad947d82fd1d9: Status 404 returned error can't find the container with id 25662f08f1a94f6ebcd396791ddc5535fd92db1b8dd856c32f1ad947d82fd1d9 Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.337571 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f" path="/var/lib/kubelet/pods/1c7cd6d3-0f98-48c6-bdfa-30aa7c3c726f/volumes" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.338506 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="331da98f-d117-4363-bc17-2bbd84a7f7d7" path="/var/lib/kubelet/pods/331da98f-d117-4363-bc17-2bbd84a7f7d7/volumes" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.339090 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="929a2b2c-f4a1-47b4-ab76-327d9b68b730" path="/var/lib/kubelet/pods/929a2b2c-f4a1-47b4-ab76-327d9b68b730/volumes" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.340212 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6ea68a1-b0ef-4035-9466-643ca03fc8a6" path="/var/lib/kubelet/pods/b6ea68a1-b0ef-4035-9466-643ca03fc8a6/volumes" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.340899 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb1af6de-8249-426c-a3ab-2ba9e009c1d7" path="/var/lib/kubelet/pods/fb1af6de-8249-426c-a3ab-2ba9e009c1d7/volumes" Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.536754 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gxcdm"] Nov 25 18:16:48 crc kubenswrapper[4926]: W1125 18:16:48.597718 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75a6defb_2e36_42a9_85b0_6913304b59a6.slice/crio-5d53954aa1e30c9e7134a042a8542a65d7eb5dd8fbedd3383855629e14f6cdb0 WatchSource:0}: Error finding container 5d53954aa1e30c9e7134a042a8542a65d7eb5dd8fbedd3383855629e14f6cdb0: Status 404 returned error can't find the container with id 5d53954aa1e30c9e7134a042a8542a65d7eb5dd8fbedd3383855629e14f6cdb0 Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.919953 4926 generic.go:334] "Generic (PLEG): container finished" podID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerID="4749c79461bc0cae1eef58404ad0abbbe08652c192b3a1e093f3527f6ebae264" exitCode=0 Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.920031 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxcdm" event={"ID":"75a6defb-2e36-42a9-85b0-6913304b59a6","Type":"ContainerDied","Data":"4749c79461bc0cae1eef58404ad0abbbe08652c192b3a1e093f3527f6ebae264"} Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.920136 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxcdm" event={"ID":"75a6defb-2e36-42a9-85b0-6913304b59a6","Type":"ContainerStarted","Data":"5d53954aa1e30c9e7134a042a8542a65d7eb5dd8fbedd3383855629e14f6cdb0"} Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.926581 4926 generic.go:334] "Generic (PLEG): container finished" podID="757453db-5fee-408c-a5a1-2214d225129d" containerID="8165ad31bf5960f66eaaaed5e216829dfdbff131aa355dc067528826a6f9c04b" exitCode=0 Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.927836 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5j4h6" event={"ID":"757453db-5fee-408c-a5a1-2214d225129d","Type":"ContainerDied","Data":"8165ad31bf5960f66eaaaed5e216829dfdbff131aa355dc067528826a6f9c04b"} Nov 25 18:16:48 crc kubenswrapper[4926]: I1125 18:16:48.927936 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5j4h6" event={"ID":"757453db-5fee-408c-a5a1-2214d225129d","Type":"ContainerStarted","Data":"25662f08f1a94f6ebcd396791ddc5535fd92db1b8dd856c32f1ad947d82fd1d9"} Nov 25 18:16:49 crc kubenswrapper[4926]: I1125 18:16:49.934050 4926 generic.go:334] "Generic (PLEG): container finished" podID="757453db-5fee-408c-a5a1-2214d225129d" containerID="4b1cfe720718cf5e7109550af1ae5012e0b0271eb7abe3ea7ac53db15ff5b001" exitCode=0 Nov 25 18:16:49 crc kubenswrapper[4926]: I1125 18:16:49.934588 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5j4h6" event={"ID":"757453db-5fee-408c-a5a1-2214d225129d","Type":"ContainerDied","Data":"4b1cfe720718cf5e7109550af1ae5012e0b0271eb7abe3ea7ac53db15ff5b001"} Nov 25 18:16:49 crc kubenswrapper[4926]: I1125 18:16:49.938433 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxcdm" event={"ID":"75a6defb-2e36-42a9-85b0-6913304b59a6","Type":"ContainerStarted","Data":"6c6e11ec136a8fbbacb3561466a9c6b9ef565b10b3833a821ac12cd9b8313b22"} Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.185119 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2xrjj"] Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.186352 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.190704 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.194828 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2xrjj"] Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.369983 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-catalog-content\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.371168 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-utilities\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.371359 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbzxh\" (UniqueName: \"kubernetes.io/projected/3db52cdf-6c82-47a4-abf5-120741331680-kube-api-access-rbzxh\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.389757 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5xqw8"] Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.391098 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.392710 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5xqw8"] Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.394434 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.473003 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-catalog-content\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.473331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-utilities\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.473517 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbzxh\" (UniqueName: \"kubernetes.io/projected/3db52cdf-6c82-47a4-abf5-120741331680-kube-api-access-rbzxh\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.473797 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-catalog-content\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.473825 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-utilities\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.494659 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbzxh\" (UniqueName: \"kubernetes.io/projected/3db52cdf-6c82-47a4-abf5-120741331680-kube-api-access-rbzxh\") pod \"redhat-operators-2xrjj\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.574953 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc09add5-d326-40d3-9dce-cf8d6ca18360-utilities\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.575006 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw2c5\" (UniqueName: \"kubernetes.io/projected/bc09add5-d326-40d3-9dce-cf8d6ca18360-kube-api-access-nw2c5\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.575052 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc09add5-d326-40d3-9dce-cf8d6ca18360-catalog-content\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.617681 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.626443 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.675809 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc09add5-d326-40d3-9dce-cf8d6ca18360-utilities\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.675867 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw2c5\" (UniqueName: \"kubernetes.io/projected/bc09add5-d326-40d3-9dce-cf8d6ca18360-kube-api-access-nw2c5\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.675914 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc09add5-d326-40d3-9dce-cf8d6ca18360-catalog-content\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.677148 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc09add5-d326-40d3-9dce-cf8d6ca18360-catalog-content\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.677840 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc09add5-d326-40d3-9dce-cf8d6ca18360-utilities\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.695731 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw2c5\" (UniqueName: \"kubernetes.io/projected/bc09add5-d326-40d3-9dce-cf8d6ca18360-kube-api-access-nw2c5\") pod \"certified-operators-5xqw8\" (UID: \"bc09add5-d326-40d3-9dce-cf8d6ca18360\") " pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.727161 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.950358 4926 generic.go:334] "Generic (PLEG): container finished" podID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerID="6c6e11ec136a8fbbacb3561466a9c6b9ef565b10b3833a821ac12cd9b8313b22" exitCode=0 Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.950920 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxcdm" event={"ID":"75a6defb-2e36-42a9-85b0-6913304b59a6","Type":"ContainerDied","Data":"6c6e11ec136a8fbbacb3561466a9c6b9ef565b10b3833a821ac12cd9b8313b22"} Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.958635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5j4h6" event={"ID":"757453db-5fee-408c-a5a1-2214d225129d","Type":"ContainerStarted","Data":"a57e7b707632e1691e58fa8c54428bc3e6035bd7076af9d4f3d895e6187e4e6d"} Nov 25 18:16:50 crc kubenswrapper[4926]: I1125 18:16:50.992624 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5j4h6" podStartSLOduration=2.529027974 podStartE2EDuration="3.991922484s" podCreationTimestamp="2025-11-25 18:16:47 +0000 UTC" firstStartedPulling="2025-11-25 18:16:48.930011657 +0000 UTC m=+239.315525262" lastFinishedPulling="2025-11-25 18:16:50.392906167 +0000 UTC m=+240.778419772" observedRunningTime="2025-11-25 18:16:50.988286595 +0000 UTC m=+241.373800210" watchObservedRunningTime="2025-11-25 18:16:50.991922484 +0000 UTC m=+241.377436099" Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.092134 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2xrjj"] Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.163120 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5xqw8"] Nov 25 18:16:51 crc kubenswrapper[4926]: W1125 18:16:51.180061 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbc09add5_d326_40d3_9dce_cf8d6ca18360.slice/crio-156e04c47e893540b94b95776acd9662451c7c7e170c47ce281a82a4e73a9c37 WatchSource:0}: Error finding container 156e04c47e893540b94b95776acd9662451c7c7e170c47ce281a82a4e73a9c37: Status 404 returned error can't find the container with id 156e04c47e893540b94b95776acd9662451c7c7e170c47ce281a82a4e73a9c37 Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.966003 4926 generic.go:334] "Generic (PLEG): container finished" podID="bc09add5-d326-40d3-9dce-cf8d6ca18360" containerID="e185cd215be7677888b31a1494a8fc5e3cf6ceb22987a1442a51b8f87f3646e7" exitCode=0 Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.966094 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5xqw8" event={"ID":"bc09add5-d326-40d3-9dce-cf8d6ca18360","Type":"ContainerDied","Data":"e185cd215be7677888b31a1494a8fc5e3cf6ceb22987a1442a51b8f87f3646e7"} Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.966664 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5xqw8" event={"ID":"bc09add5-d326-40d3-9dce-cf8d6ca18360","Type":"ContainerStarted","Data":"156e04c47e893540b94b95776acd9662451c7c7e170c47ce281a82a4e73a9c37"} Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.970720 4926 generic.go:334] "Generic (PLEG): container finished" podID="3db52cdf-6c82-47a4-abf5-120741331680" containerID="6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6" exitCode=0 Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.970827 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xrjj" event={"ID":"3db52cdf-6c82-47a4-abf5-120741331680","Type":"ContainerDied","Data":"6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6"} Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.970876 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xrjj" event={"ID":"3db52cdf-6c82-47a4-abf5-120741331680","Type":"ContainerStarted","Data":"8331301e92799b977a34075833c5c70d25fe21ccc4ed82aa8d47dad9ec6a5ae6"} Nov 25 18:16:51 crc kubenswrapper[4926]: I1125 18:16:51.976655 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxcdm" event={"ID":"75a6defb-2e36-42a9-85b0-6913304b59a6","Type":"ContainerStarted","Data":"b966e160527e1120cc67cf52430e5910b04c0c6622546b2cc303693448d8bc3b"} Nov 25 18:16:52 crc kubenswrapper[4926]: I1125 18:16:52.030723 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gxcdm" podStartSLOduration=2.606856779 podStartE2EDuration="5.030703333s" podCreationTimestamp="2025-11-25 18:16:47 +0000 UTC" firstStartedPulling="2025-11-25 18:16:48.922567177 +0000 UTC m=+239.308080792" lastFinishedPulling="2025-11-25 18:16:51.346413751 +0000 UTC m=+241.731927346" observedRunningTime="2025-11-25 18:16:52.030010404 +0000 UTC m=+242.415524009" watchObservedRunningTime="2025-11-25 18:16:52.030703333 +0000 UTC m=+242.416216938" Nov 25 18:16:52 crc kubenswrapper[4926]: I1125 18:16:52.985425 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xrjj" event={"ID":"3db52cdf-6c82-47a4-abf5-120741331680","Type":"ContainerStarted","Data":"87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63"} Nov 25 18:16:52 crc kubenswrapper[4926]: I1125 18:16:52.989338 4926 generic.go:334] "Generic (PLEG): container finished" podID="bc09add5-d326-40d3-9dce-cf8d6ca18360" containerID="9ad84807b13ab568df3d5330a361ff2c8d92b93a36836721542d8793eb811b91" exitCode=0 Nov 25 18:16:52 crc kubenswrapper[4926]: I1125 18:16:52.990629 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5xqw8" event={"ID":"bc09add5-d326-40d3-9dce-cf8d6ca18360","Type":"ContainerDied","Data":"9ad84807b13ab568df3d5330a361ff2c8d92b93a36836721542d8793eb811b91"} Nov 25 18:16:53 crc kubenswrapper[4926]: I1125 18:16:53.997875 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5xqw8" event={"ID":"bc09add5-d326-40d3-9dce-cf8d6ca18360","Type":"ContainerStarted","Data":"b4e3d6574410a4a0dcfe20e56b24017acfdf388cc69f0a952f1d484ca9a4eca0"} Nov 25 18:16:53 crc kubenswrapper[4926]: I1125 18:16:53.999521 4926 generic.go:334] "Generic (PLEG): container finished" podID="3db52cdf-6c82-47a4-abf5-120741331680" containerID="87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63" exitCode=0 Nov 25 18:16:53 crc kubenswrapper[4926]: I1125 18:16:53.999560 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xrjj" event={"ID":"3db52cdf-6c82-47a4-abf5-120741331680","Type":"ContainerDied","Data":"87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63"} Nov 25 18:16:54 crc kubenswrapper[4926]: I1125 18:16:54.034874 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5xqw8" podStartSLOduration=2.576337063 podStartE2EDuration="4.034856376s" podCreationTimestamp="2025-11-25 18:16:50 +0000 UTC" firstStartedPulling="2025-11-25 18:16:51.968423247 +0000 UTC m=+242.353936852" lastFinishedPulling="2025-11-25 18:16:53.42694256 +0000 UTC m=+243.812456165" observedRunningTime="2025-11-25 18:16:54.015288249 +0000 UTC m=+244.400801854" watchObservedRunningTime="2025-11-25 18:16:54.034856376 +0000 UTC m=+244.420369981" Nov 25 18:16:55 crc kubenswrapper[4926]: I1125 18:16:55.008500 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xrjj" event={"ID":"3db52cdf-6c82-47a4-abf5-120741331680","Type":"ContainerStarted","Data":"94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5"} Nov 25 18:16:55 crc kubenswrapper[4926]: I1125 18:16:55.026946 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2xrjj" podStartSLOduration=2.625327401 podStartE2EDuration="5.026929398s" podCreationTimestamp="2025-11-25 18:16:50 +0000 UTC" firstStartedPulling="2025-11-25 18:16:51.97300741 +0000 UTC m=+242.358521005" lastFinishedPulling="2025-11-25 18:16:54.374609397 +0000 UTC m=+244.760123002" observedRunningTime="2025-11-25 18:16:55.022338564 +0000 UTC m=+245.407852189" watchObservedRunningTime="2025-11-25 18:16:55.026929398 +0000 UTC m=+245.412443013" Nov 25 18:16:58 crc kubenswrapper[4926]: I1125 18:16:58.108633 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:58 crc kubenswrapper[4926]: I1125 18:16:58.109596 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:58 crc kubenswrapper[4926]: I1125 18:16:58.169706 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:16:58 crc kubenswrapper[4926]: I1125 18:16:58.305262 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:58 crc kubenswrapper[4926]: I1125 18:16:58.305318 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:58 crc kubenswrapper[4926]: I1125 18:16:58.342222 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:59 crc kubenswrapper[4926]: I1125 18:16:59.074440 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 18:16:59 crc kubenswrapper[4926]: I1125 18:16:59.076865 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5j4h6" Nov 25 18:17:00 crc kubenswrapper[4926]: I1125 18:17:00.626518 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:17:00 crc kubenswrapper[4926]: I1125 18:17:00.626569 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:17:00 crc kubenswrapper[4926]: I1125 18:17:00.663968 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:17:00 crc kubenswrapper[4926]: I1125 18:17:00.728628 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:17:00 crc kubenswrapper[4926]: I1125 18:17:00.728699 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:17:00 crc kubenswrapper[4926]: I1125 18:17:00.781399 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:17:01 crc kubenswrapper[4926]: I1125 18:17:01.085730 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:17:01 crc kubenswrapper[4926]: I1125 18:17:01.086195 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5xqw8" Nov 25 18:18:33 crc kubenswrapper[4926]: I1125 18:18:33.541185 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:18:33 crc kubenswrapper[4926]: I1125 18:18:33.541841 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:19:03 crc kubenswrapper[4926]: I1125 18:19:03.541510 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:19:03 crc kubenswrapper[4926]: I1125 18:19:03.542198 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:19:33 crc kubenswrapper[4926]: I1125 18:19:33.541448 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:19:33 crc kubenswrapper[4926]: I1125 18:19:33.542031 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:19:33 crc kubenswrapper[4926]: I1125 18:19:33.542101 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:19:33 crc kubenswrapper[4926]: I1125 18:19:33.542919 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2605307c38a76c7ce73c047e48783a7fd79aada84e3b313e0f1c6214104df2a9"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:19:33 crc kubenswrapper[4926]: I1125 18:19:33.543018 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://2605307c38a76c7ce73c047e48783a7fd79aada84e3b313e0f1c6214104df2a9" gracePeriod=600 Nov 25 18:19:34 crc kubenswrapper[4926]: I1125 18:19:34.076756 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="2605307c38a76c7ce73c047e48783a7fd79aada84e3b313e0f1c6214104df2a9" exitCode=0 Nov 25 18:19:34 crc kubenswrapper[4926]: I1125 18:19:34.076835 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"2605307c38a76c7ce73c047e48783a7fd79aada84e3b313e0f1c6214104df2a9"} Nov 25 18:19:34 crc kubenswrapper[4926]: I1125 18:19:34.077304 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"4780239f9864310c55b02fee7ce2fe9b6cc7572aec239cace54a7899ca327d6e"} Nov 25 18:19:34 crc kubenswrapper[4926]: I1125 18:19:34.077336 4926 scope.go:117] "RemoveContainer" containerID="a41230f77e3ce011a0969fa030e6f213fba72983c117acace96c81cfa2518bb1" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.190184 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gmzsn"] Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.191622 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.246865 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gmzsn"] Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.382561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-registry-tls\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.382914 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a90bb080-0480-4a5c-9fee-93684738e0cf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.383040 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a90bb080-0480-4a5c-9fee-93684738e0cf-trusted-ca\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.383135 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xks72\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-kube-api-access-xks72\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.383234 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a90bb080-0480-4a5c-9fee-93684738e0cf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.383320 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a90bb080-0480-4a5c-9fee-93684738e0cf-registry-certificates\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.383430 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-bound-sa-token\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.383516 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.405816 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.485512 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-registry-tls\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.485564 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a90bb080-0480-4a5c-9fee-93684738e0cf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.485591 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a90bb080-0480-4a5c-9fee-93684738e0cf-trusted-ca\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.485614 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xks72\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-kube-api-access-xks72\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.485655 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a90bb080-0480-4a5c-9fee-93684738e0cf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.485681 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a90bb080-0480-4a5c-9fee-93684738e0cf-registry-certificates\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.485714 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-bound-sa-token\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.486033 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a90bb080-0480-4a5c-9fee-93684738e0cf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.487124 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a90bb080-0480-4a5c-9fee-93684738e0cf-trusted-ca\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.487193 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a90bb080-0480-4a5c-9fee-93684738e0cf-registry-certificates\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.491303 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-registry-tls\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.492039 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a90bb080-0480-4a5c-9fee-93684738e0cf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.502678 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-bound-sa-token\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.505913 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xks72\" (UniqueName: \"kubernetes.io/projected/a90bb080-0480-4a5c-9fee-93684738e0cf-kube-api-access-xks72\") pod \"image-registry-66df7c8f76-gmzsn\" (UID: \"a90bb080-0480-4a5c-9fee-93684738e0cf\") " pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.508303 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:50 crc kubenswrapper[4926]: I1125 18:19:50.710700 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gmzsn"] Nov 25 18:19:51 crc kubenswrapper[4926]: I1125 18:19:51.186296 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" event={"ID":"a90bb080-0480-4a5c-9fee-93684738e0cf","Type":"ContainerStarted","Data":"a830671c5d4aea5cfca16dd8c27bb4817c87500e21a32efd2734a97c90ff96b8"} Nov 25 18:19:51 crc kubenswrapper[4926]: I1125 18:19:51.186552 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" event={"ID":"a90bb080-0480-4a5c-9fee-93684738e0cf","Type":"ContainerStarted","Data":"80cd692244a4786d9286ca944a6f276c77f4d6cfe82b0db021667d49d6bab9c5"} Nov 25 18:19:51 crc kubenswrapper[4926]: I1125 18:19:51.186756 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:19:51 crc kubenswrapper[4926]: I1125 18:19:51.209480 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" podStartSLOduration=1.209427255 podStartE2EDuration="1.209427255s" podCreationTimestamp="2025-11-25 18:19:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:19:51.205349296 +0000 UTC m=+421.590862921" watchObservedRunningTime="2025-11-25 18:19:51.209427255 +0000 UTC m=+421.594940860" Nov 25 18:20:10 crc kubenswrapper[4926]: I1125 18:20:10.514738 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" Nov 25 18:20:10 crc kubenswrapper[4926]: I1125 18:20:10.581320 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qkn8k"] Nov 25 18:20:35 crc kubenswrapper[4926]: I1125 18:20:35.624706 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" podUID="881e35fe-f917-461a-a1d6-804e58b5b740" containerName="registry" containerID="cri-o://36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151" gracePeriod=30 Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.044499 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.098296 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/881e35fe-f917-461a-a1d6-804e58b5b740-installation-pull-secrets\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.098425 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/881e35fe-f917-461a-a1d6-804e58b5b740-ca-trust-extracted\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.098461 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-registry-tls\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.098527 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-registry-certificates\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.099419 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.099851 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.099880 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-trusted-ca\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.099925 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9zbl\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-kube-api-access-k9zbl\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.100002 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-bound-sa-token\") pod \"881e35fe-f917-461a-a1d6-804e58b5b740\" (UID: \"881e35fe-f917-461a-a1d6-804e58b5b740\") " Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.100426 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.100899 4926 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.100943 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/881e35fe-f917-461a-a1d6-804e58b5b740-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.104571 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.104840 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.105351 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-kube-api-access-k9zbl" (OuterVolumeSpecName: "kube-api-access-k9zbl") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "kube-api-access-k9zbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.108417 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/881e35fe-f917-461a-a1d6-804e58b5b740-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.113323 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.119273 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/881e35fe-f917-461a-a1d6-804e58b5b740-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "881e35fe-f917-461a-a1d6-804e58b5b740" (UID: "881e35fe-f917-461a-a1d6-804e58b5b740"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.202868 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.202907 4926 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/881e35fe-f917-461a-a1d6-804e58b5b740-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.202918 4926 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/881e35fe-f917-461a-a1d6-804e58b5b740-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.202929 4926 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.202938 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9zbl\" (UniqueName: \"kubernetes.io/projected/881e35fe-f917-461a-a1d6-804e58b5b740-kube-api-access-k9zbl\") on node \"crc\" DevicePath \"\"" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.455787 4926 generic.go:334] "Generic (PLEG): container finished" podID="881e35fe-f917-461a-a1d6-804e58b5b740" containerID="36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151" exitCode=0 Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.455875 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" event={"ID":"881e35fe-f917-461a-a1d6-804e58b5b740","Type":"ContainerDied","Data":"36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151"} Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.456023 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.456244 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qkn8k" event={"ID":"881e35fe-f917-461a-a1d6-804e58b5b740","Type":"ContainerDied","Data":"e131c0a16cd35e21b4e8a4df33f3b8ab03ed857b4ae6e7c48c0701aacb5e6be4"} Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.456317 4926 scope.go:117] "RemoveContainer" containerID="36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.473407 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qkn8k"] Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.483009 4926 scope.go:117] "RemoveContainer" containerID="36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.483060 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qkn8k"] Nov 25 18:20:36 crc kubenswrapper[4926]: E1125 18:20:36.484212 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151\": container with ID starting with 36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151 not found: ID does not exist" containerID="36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151" Nov 25 18:20:36 crc kubenswrapper[4926]: I1125 18:20:36.484274 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151"} err="failed to get container status \"36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151\": rpc error: code = NotFound desc = could not find container \"36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151\": container with ID starting with 36666a08ddd935de5a88e71b5e7b305bbdd8cb188a40e92777d4cd4a0b988151 not found: ID does not exist" Nov 25 18:20:38 crc kubenswrapper[4926]: I1125 18:20:38.344558 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="881e35fe-f917-461a-a1d6-804e58b5b740" path="/var/lib/kubelet/pods/881e35fe-f917-461a-a1d6-804e58b5b740/volumes" Nov 25 18:22:03 crc kubenswrapper[4926]: I1125 18:22:03.541006 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:22:03 crc kubenswrapper[4926]: I1125 18:22:03.541799 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.534534 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fzlrf"] Nov 25 18:22:28 crc kubenswrapper[4926]: E1125 18:22:28.535822 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="881e35fe-f917-461a-a1d6-804e58b5b740" containerName="registry" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.535843 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="881e35fe-f917-461a-a1d6-804e58b5b740" containerName="registry" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.535964 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="881e35fe-f917-461a-a1d6-804e58b5b740" containerName="registry" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.536399 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.538825 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-hxjds" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.539306 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.539352 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.548447 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fzlrf"] Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.569884 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-x8r5t"] Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.570702 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.592935 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-wcjpv" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.602467 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-qhdk2"] Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.605313 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.608881 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-5db2z" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.609891 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ms75\" (UniqueName: \"kubernetes.io/projected/fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce-kube-api-access-8ms75\") pod \"cert-manager-webhook-5655c58dd6-qhdk2\" (UID: \"fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.609935 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp58h\" (UniqueName: \"kubernetes.io/projected/e5a12f1c-5ece-4ecc-a24b-6570902b3f18-kube-api-access-cp58h\") pod \"cert-manager-cainjector-7f985d654d-fzlrf\" (UID: \"e5a12f1c-5ece-4ecc-a24b-6570902b3f18\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.609976 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff48k\" (UniqueName: \"kubernetes.io/projected/5fd3c793-dd9b-475c-b527-51c52d21e018-kube-api-access-ff48k\") pod \"cert-manager-5b446d88c5-x8r5t\" (UID: \"5fd3c793-dd9b-475c-b527-51c52d21e018\") " pod="cert-manager/cert-manager-5b446d88c5-x8r5t" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.623626 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-x8r5t"] Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.626485 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-qhdk2"] Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.711962 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ms75\" (UniqueName: \"kubernetes.io/projected/fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce-kube-api-access-8ms75\") pod \"cert-manager-webhook-5655c58dd6-qhdk2\" (UID: \"fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.712034 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp58h\" (UniqueName: \"kubernetes.io/projected/e5a12f1c-5ece-4ecc-a24b-6570902b3f18-kube-api-access-cp58h\") pod \"cert-manager-cainjector-7f985d654d-fzlrf\" (UID: \"e5a12f1c-5ece-4ecc-a24b-6570902b3f18\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.712147 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff48k\" (UniqueName: \"kubernetes.io/projected/5fd3c793-dd9b-475c-b527-51c52d21e018-kube-api-access-ff48k\") pod \"cert-manager-5b446d88c5-x8r5t\" (UID: \"5fd3c793-dd9b-475c-b527-51c52d21e018\") " pod="cert-manager/cert-manager-5b446d88c5-x8r5t" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.734181 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ms75\" (UniqueName: \"kubernetes.io/projected/fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce-kube-api-access-8ms75\") pod \"cert-manager-webhook-5655c58dd6-qhdk2\" (UID: \"fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.736042 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff48k\" (UniqueName: \"kubernetes.io/projected/5fd3c793-dd9b-475c-b527-51c52d21e018-kube-api-access-ff48k\") pod \"cert-manager-5b446d88c5-x8r5t\" (UID: \"5fd3c793-dd9b-475c-b527-51c52d21e018\") " pod="cert-manager/cert-manager-5b446d88c5-x8r5t" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.743192 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp58h\" (UniqueName: \"kubernetes.io/projected/e5a12f1c-5ece-4ecc-a24b-6570902b3f18-kube-api-access-cp58h\") pod \"cert-manager-cainjector-7f985d654d-fzlrf\" (UID: \"e5a12f1c-5ece-4ecc-a24b-6570902b3f18\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.860424 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.905416 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" Nov 25 18:22:28 crc kubenswrapper[4926]: I1125 18:22:28.931932 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" Nov 25 18:22:29 crc kubenswrapper[4926]: I1125 18:22:29.166717 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-x8r5t"] Nov 25 18:22:29 crc kubenswrapper[4926]: I1125 18:22:29.176937 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:22:29 crc kubenswrapper[4926]: I1125 18:22:29.212524 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" event={"ID":"5fd3c793-dd9b-475c-b527-51c52d21e018","Type":"ContainerStarted","Data":"bd0b51cc944f41965e34c0af5c42c239eb2bcaf4fb52954b4c4862a41b4bf782"} Nov 25 18:22:29 crc kubenswrapper[4926]: I1125 18:22:29.222428 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-qhdk2"] Nov 25 18:22:29 crc kubenswrapper[4926]: W1125 18:22:29.224762 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc0b92d7_9c5e_4cf4_b31e_19bb7c0e38ce.slice/crio-7e578fcb1c38c9dc05beb63bfa32cf481fead0b2f2d37265fd856e3ca3de6440 WatchSource:0}: Error finding container 7e578fcb1c38c9dc05beb63bfa32cf481fead0b2f2d37265fd856e3ca3de6440: Status 404 returned error can't find the container with id 7e578fcb1c38c9dc05beb63bfa32cf481fead0b2f2d37265fd856e3ca3de6440 Nov 25 18:22:29 crc kubenswrapper[4926]: I1125 18:22:29.320993 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fzlrf"] Nov 25 18:22:30 crc kubenswrapper[4926]: I1125 18:22:30.224154 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" event={"ID":"e5a12f1c-5ece-4ecc-a24b-6570902b3f18","Type":"ContainerStarted","Data":"928723574a72ccfc8c23f273f62f6807646fde416a0ede75cbdcc6368f34bd9c"} Nov 25 18:22:30 crc kubenswrapper[4926]: I1125 18:22:30.232574 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" event={"ID":"fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce","Type":"ContainerStarted","Data":"7e578fcb1c38c9dc05beb63bfa32cf481fead0b2f2d37265fd856e3ca3de6440"} Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.261386 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" event={"ID":"5fd3c793-dd9b-475c-b527-51c52d21e018","Type":"ContainerStarted","Data":"1498ffdd9479e34b9c15388cba295e1484ab01daa8106521ded067f18eef006a"} Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.263996 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" event={"ID":"e5a12f1c-5ece-4ecc-a24b-6570902b3f18","Type":"ContainerStarted","Data":"d9aed9a9fe37264ff99bfb2c79aaf784be38aea56b1283fa0711009bc340e398"} Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.269235 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" event={"ID":"fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce","Type":"ContainerStarted","Data":"de06fba8faa0da0b514a2fe02c25c4e5d6194e19504a4cc0e9ca8c9b8bc1a77d"} Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.269322 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.281732 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" podStartSLOduration=2.121484777 podStartE2EDuration="5.281686928s" podCreationTimestamp="2025-11-25 18:22:28 +0000 UTC" firstStartedPulling="2025-11-25 18:22:29.176724409 +0000 UTC m=+579.562238014" lastFinishedPulling="2025-11-25 18:22:32.33692652 +0000 UTC m=+582.722440165" observedRunningTime="2025-11-25 18:22:33.277719817 +0000 UTC m=+583.663233432" watchObservedRunningTime="2025-11-25 18:22:33.281686928 +0000 UTC m=+583.667200533" Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.300182 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-fzlrf" podStartSLOduration=1.619709989 podStartE2EDuration="5.300150918s" podCreationTimestamp="2025-11-25 18:22:28 +0000 UTC" firstStartedPulling="2025-11-25 18:22:29.338003713 +0000 UTC m=+579.723517318" lastFinishedPulling="2025-11-25 18:22:33.018444622 +0000 UTC m=+583.403958247" observedRunningTime="2025-11-25 18:22:33.298269846 +0000 UTC m=+583.683783451" watchObservedRunningTime="2025-11-25 18:22:33.300150918 +0000 UTC m=+583.685664563" Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.324657 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" podStartSLOduration=2.229913867 podStartE2EDuration="5.324628516s" podCreationTimestamp="2025-11-25 18:22:28 +0000 UTC" firstStartedPulling="2025-11-25 18:22:29.227594767 +0000 UTC m=+579.613108362" lastFinishedPulling="2025-11-25 18:22:32.322309406 +0000 UTC m=+582.707823011" observedRunningTime="2025-11-25 18:22:33.3226192 +0000 UTC m=+583.708132805" watchObservedRunningTime="2025-11-25 18:22:33.324628516 +0000 UTC m=+583.710142171" Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.541154 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:22:33 crc kubenswrapper[4926]: I1125 18:22:33.541278 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.344286 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zrwvb"] Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.345170 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-controller" containerID="cri-o://48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.345799 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="sbdb" containerID="cri-o://29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.345849 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="nbdb" containerID="cri-o://c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.345890 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="northd" containerID="cri-o://bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.345927 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.345967 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-node" containerID="cri-o://19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.346002 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-acl-logging" containerID="cri-o://54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.391474 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" containerID="cri-o://0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" gracePeriod=30 Nov 25 18:22:38 crc kubenswrapper[4926]: I1125 18:22:38.935571 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-qhdk2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.045794 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/3.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.048367 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovn-acl-logging/0.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.048918 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovn-controller/0.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.049453 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.110552 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5hsj2"] Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.110905 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.110947 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.110958 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.110967 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.110987 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kubecfg-setup" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111016 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kubecfg-setup" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111027 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-acl-logging" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111036 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-acl-logging" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111050 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111064 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111106 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="nbdb" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111115 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="nbdb" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111127 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111135 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111146 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="northd" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111155 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="northd" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111194 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-node" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111202 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-node" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111211 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111219 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111231 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="sbdb" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111260 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="sbdb" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111468 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111516 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovn-acl-logging" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111531 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="northd" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111544 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111555 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111566 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111600 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="kube-rbac-proxy-node" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111624 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111633 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="sbdb" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111642 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="nbdb" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111802 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111836 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.111856 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.111865 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.112030 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.112049 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62905073-17d2-4b78-9921-02a343480b34" containerName="ovnkube-controller" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.115976 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169474 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/62905073-17d2-4b78-9921-02a343480b34-ovn-node-metrics-cert\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169599 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-bin\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169669 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-etc-openvswitch\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169696 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-ovn\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169721 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-openvswitch\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169751 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-var-lib-cni-networks-ovn-kubernetes\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169798 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-netns\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169789 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169838 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169789 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169805 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169843 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-systemd\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169878 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169856 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.169950 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-slash\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170008 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-kubelet\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170031 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-slash" (OuterVolumeSpecName: "host-slash") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170038 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-netd\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170064 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170090 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-config\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170122 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-log-socket\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170154 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-env-overrides\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170179 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-ovn-kubernetes\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170094 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170182 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-log-socket" (OuterVolumeSpecName: "log-socket") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170207 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-script-lib\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170252 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-var-lib-openvswitch\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170261 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170289 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-systemd-units\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170303 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170321 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-node-log\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170332 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170365 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmzlv\" (UniqueName: \"kubernetes.io/projected/62905073-17d2-4b78-9921-02a343480b34-kube-api-access-xmzlv\") pod \"62905073-17d2-4b78-9921-02a343480b34\" (UID: \"62905073-17d2-4b78-9921-02a343480b34\") " Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170412 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-node-log" (OuterVolumeSpecName: "node-log") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170603 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170668 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170680 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170975 4926 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.170992 4926 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171004 4926 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171013 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171022 4926 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171031 4926 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171041 4926 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171050 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/62905073-17d2-4b78-9921-02a343480b34-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171059 4926 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171068 4926 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171076 4926 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171083 4926 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171094 4926 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171103 4926 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171113 4926 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171121 4926 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.171130 4926 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.177472 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62905073-17d2-4b78-9921-02a343480b34-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.177930 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62905073-17d2-4b78-9921-02a343480b34-kube-api-access-xmzlv" (OuterVolumeSpecName: "kube-api-access-xmzlv") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "kube-api-access-xmzlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.186318 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "62905073-17d2-4b78-9921-02a343480b34" (UID: "62905073-17d2-4b78-9921-02a343480b34"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.272502 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.272583 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-cni-netd\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.272735 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-ovnkube-config\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273027 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-kubelet\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273212 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273320 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-cni-bin\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273459 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-node-log\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273618 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-log-socket\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273695 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-etc-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273748 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl7x7\" (UniqueName: \"kubernetes.io/projected/92b83f42-5c38-431f-9471-180c5aef549d-kube-api-access-wl7x7\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.273928 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-ovnkube-script-lib\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274040 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/92b83f42-5c38-431f-9471-180c5aef549d-ovn-node-metrics-cert\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274104 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-systemd-units\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274140 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-ovn\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274192 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-env-overrides\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274211 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-systemd\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274242 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-run-ovn-kubernetes\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274282 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-run-netns\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274303 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-slash\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274325 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-var-lib-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274372 4926 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/62905073-17d2-4b78-9921-02a343480b34-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274410 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmzlv\" (UniqueName: \"kubernetes.io/projected/62905073-17d2-4b78-9921-02a343480b34-kube-api-access-xmzlv\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.274424 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/62905073-17d2-4b78-9921-02a343480b34-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.309567 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovnkube-controller/3.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.314060 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovn-acl-logging/0.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.314745 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zrwvb_62905073-17d2-4b78-9921-02a343480b34/ovn-controller/0.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315284 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" exitCode=0 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315329 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" exitCode=0 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315353 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" exitCode=0 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315395 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315369 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" exitCode=0 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315433 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" exitCode=0 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315449 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" exitCode=0 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315466 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" exitCode=143 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315485 4926 generic.go:334] "Generic (PLEG): container finished" podID="62905073-17d2-4b78-9921-02a343480b34" containerID="48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" exitCode=143 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315406 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315593 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315606 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315623 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315627 4926 scope.go:117] "RemoveContainer" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315674 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315688 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315695 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315700 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315706 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315711 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315717 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315723 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315728 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315736 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315745 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315751 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315757 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315763 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315769 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315774 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315779 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315784 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315789 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315794 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315803 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315811 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315817 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315823 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315829 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315835 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315841 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315847 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315853 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315858 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315863 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315870 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zrwvb" event={"ID":"62905073-17d2-4b78-9921-02a343480b34","Type":"ContainerDied","Data":"087fa0f491efee00babbadfed1073fc9e87aa407fb06bc92bf83bb52c6619ca7"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315879 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315886 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315891 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315896 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315902 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315907 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315912 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315919 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315924 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.315929 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.317514 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/2.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.318163 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/1.log" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.318236 4926 generic.go:334] "Generic (PLEG): container finished" podID="78af77fa-0071-48e9-8b78-bdd92abfb013" containerID="578dba399f1b6a1dc334859cac5b006c3b3927f1a980d23e434bc3236dd17e01" exitCode=2 Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.318290 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerDied","Data":"578dba399f1b6a1dc334859cac5b006c3b3927f1a980d23e434bc3236dd17e01"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.318334 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd"} Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.319319 4926 scope.go:117] "RemoveContainer" containerID="578dba399f1b6a1dc334859cac5b006c3b3927f1a980d23e434bc3236dd17e01" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.319728 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-49qhh_openshift-multus(78af77fa-0071-48e9-8b78-bdd92abfb013)\"" pod="openshift-multus/multus-49qhh" podUID="78af77fa-0071-48e9-8b78-bdd92abfb013" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.343668 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.377455 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-ovnkube-config\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.377620 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-kubelet\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.377713 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.377794 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-cni-bin\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.377868 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-node-log\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.377943 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-log-socket\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378012 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-etc-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl7x7\" (UniqueName: \"kubernetes.io/projected/92b83f42-5c38-431f-9471-180c5aef549d-kube-api-access-wl7x7\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378129 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-ovnkube-script-lib\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378197 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/92b83f42-5c38-431f-9471-180c5aef549d-ovn-node-metrics-cert\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378269 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-systemd-units\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378772 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zrwvb"] Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378946 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-log-socket\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.379049 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-etc-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.379059 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-cni-bin\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378984 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.379251 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-systemd-units\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.379307 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-node-log\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.379314 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-kubelet\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.380198 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-ovnkube-config\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.380204 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-ovnkube-script-lib\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.381066 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-ovn\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.384118 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/92b83f42-5c38-431f-9471-180c5aef549d-ovn-node-metrics-cert\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.384662 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zrwvb"] Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.388079 4926 scope.go:117] "RemoveContainer" containerID="29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.378334 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-ovn\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.390707 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-env-overrides\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.390796 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-systemd\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.390852 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-run-ovn-kubernetes\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.390907 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-run-netns\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.390960 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-slash\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391000 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-var-lib-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391002 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-systemd\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391061 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391080 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-slash\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391113 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-run-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391003 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-run-ovn-kubernetes\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391128 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-var-lib-openvswitch\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391122 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-run-netns\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391347 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-cni-netd\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391600 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/92b83f42-5c38-431f-9471-180c5aef549d-host-cni-netd\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.391752 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/92b83f42-5c38-431f-9471-180c5aef549d-env-overrides\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.407022 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl7x7\" (UniqueName: \"kubernetes.io/projected/92b83f42-5c38-431f-9471-180c5aef549d-kube-api-access-wl7x7\") pod \"ovnkube-node-5hsj2\" (UID: \"92b83f42-5c38-431f-9471-180c5aef549d\") " pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.410036 4926 scope.go:117] "RemoveContainer" containerID="c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.425141 4926 scope.go:117] "RemoveContainer" containerID="bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.433514 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.439542 4926 scope.go:117] "RemoveContainer" containerID="9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.456315 4926 scope.go:117] "RemoveContainer" containerID="19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.534337 4926 scope.go:117] "RemoveContainer" containerID="54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.578717 4926 scope.go:117] "RemoveContainer" containerID="48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.598164 4926 scope.go:117] "RemoveContainer" containerID="70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.613948 4926 scope.go:117] "RemoveContainer" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.614550 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": container with ID starting with 0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226 not found: ID does not exist" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.614600 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} err="failed to get container status \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": rpc error: code = NotFound desc = could not find container \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": container with ID starting with 0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.614631 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.615207 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": container with ID starting with 7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f not found: ID does not exist" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.615272 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} err="failed to get container status \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": rpc error: code = NotFound desc = could not find container \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": container with ID starting with 7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.615305 4926 scope.go:117] "RemoveContainer" containerID="29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.615748 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": container with ID starting with 29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97 not found: ID does not exist" containerID="29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.615775 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} err="failed to get container status \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": rpc error: code = NotFound desc = could not find container \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": container with ID starting with 29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.615794 4926 scope.go:117] "RemoveContainer" containerID="c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.616241 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": container with ID starting with c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9 not found: ID does not exist" containerID="c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.616271 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} err="failed to get container status \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": rpc error: code = NotFound desc = could not find container \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": container with ID starting with c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.616286 4926 scope.go:117] "RemoveContainer" containerID="bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.616827 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": container with ID starting with bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0 not found: ID does not exist" containerID="bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.616859 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} err="failed to get container status \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": rpc error: code = NotFound desc = could not find container \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": container with ID starting with bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.616905 4926 scope.go:117] "RemoveContainer" containerID="9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.617275 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": container with ID starting with 9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e not found: ID does not exist" containerID="9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.617303 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} err="failed to get container status \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": rpc error: code = NotFound desc = could not find container \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": container with ID starting with 9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.617320 4926 scope.go:117] "RemoveContainer" containerID="19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.617654 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": container with ID starting with 19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8 not found: ID does not exist" containerID="19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.617676 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} err="failed to get container status \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": rpc error: code = NotFound desc = could not find container \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": container with ID starting with 19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.617690 4926 scope.go:117] "RemoveContainer" containerID="54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.617980 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": container with ID starting with 54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83 not found: ID does not exist" containerID="54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.618004 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} err="failed to get container status \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": rpc error: code = NotFound desc = could not find container \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": container with ID starting with 54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.618024 4926 scope.go:117] "RemoveContainer" containerID="48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.618412 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": container with ID starting with 48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244 not found: ID does not exist" containerID="48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.618433 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} err="failed to get container status \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": rpc error: code = NotFound desc = could not find container \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": container with ID starting with 48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.618446 4926 scope.go:117] "RemoveContainer" containerID="70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210" Nov 25 18:22:39 crc kubenswrapper[4926]: E1125 18:22:39.618956 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": container with ID starting with 70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210 not found: ID does not exist" containerID="70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.618991 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} err="failed to get container status \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": rpc error: code = NotFound desc = could not find container \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": container with ID starting with 70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.619011 4926 scope.go:117] "RemoveContainer" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.619340 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} err="failed to get container status \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": rpc error: code = NotFound desc = could not find container \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": container with ID starting with 0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.619367 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.619881 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} err="failed to get container status \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": rpc error: code = NotFound desc = could not find container \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": container with ID starting with 7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.619906 4926 scope.go:117] "RemoveContainer" containerID="29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.620149 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} err="failed to get container status \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": rpc error: code = NotFound desc = could not find container \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": container with ID starting with 29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.620171 4926 scope.go:117] "RemoveContainer" containerID="c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.620417 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} err="failed to get container status \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": rpc error: code = NotFound desc = could not find container \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": container with ID starting with c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.620438 4926 scope.go:117] "RemoveContainer" containerID="bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.621016 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} err="failed to get container status \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": rpc error: code = NotFound desc = could not find container \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": container with ID starting with bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.621040 4926 scope.go:117] "RemoveContainer" containerID="9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.621302 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} err="failed to get container status \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": rpc error: code = NotFound desc = could not find container \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": container with ID starting with 9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.621323 4926 scope.go:117] "RemoveContainer" containerID="19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.621579 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} err="failed to get container status \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": rpc error: code = NotFound desc = could not find container \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": container with ID starting with 19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.621601 4926 scope.go:117] "RemoveContainer" containerID="54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.622212 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} err="failed to get container status \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": rpc error: code = NotFound desc = could not find container \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": container with ID starting with 54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.622236 4926 scope.go:117] "RemoveContainer" containerID="48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.622599 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} err="failed to get container status \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": rpc error: code = NotFound desc = could not find container \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": container with ID starting with 48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.622658 4926 scope.go:117] "RemoveContainer" containerID="70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.623239 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} err="failed to get container status \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": rpc error: code = NotFound desc = could not find container \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": container with ID starting with 70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.623267 4926 scope.go:117] "RemoveContainer" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.623695 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} err="failed to get container status \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": rpc error: code = NotFound desc = could not find container \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": container with ID starting with 0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.623738 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.624085 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} err="failed to get container status \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": rpc error: code = NotFound desc = could not find container \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": container with ID starting with 7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.624130 4926 scope.go:117] "RemoveContainer" containerID="29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.624610 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} err="failed to get container status \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": rpc error: code = NotFound desc = could not find container \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": container with ID starting with 29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.624642 4926 scope.go:117] "RemoveContainer" containerID="c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.625223 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} err="failed to get container status \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": rpc error: code = NotFound desc = could not find container \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": container with ID starting with c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.625247 4926 scope.go:117] "RemoveContainer" containerID="bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.626130 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} err="failed to get container status \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": rpc error: code = NotFound desc = could not find container \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": container with ID starting with bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.626157 4926 scope.go:117] "RemoveContainer" containerID="9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.626643 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} err="failed to get container status \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": rpc error: code = NotFound desc = could not find container \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": container with ID starting with 9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.626673 4926 scope.go:117] "RemoveContainer" containerID="19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.627129 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} err="failed to get container status \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": rpc error: code = NotFound desc = could not find container \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": container with ID starting with 19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.627155 4926 scope.go:117] "RemoveContainer" containerID="54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.627698 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} err="failed to get container status \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": rpc error: code = NotFound desc = could not find container \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": container with ID starting with 54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.627791 4926 scope.go:117] "RemoveContainer" containerID="48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.628217 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} err="failed to get container status \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": rpc error: code = NotFound desc = could not find container \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": container with ID starting with 48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.629065 4926 scope.go:117] "RemoveContainer" containerID="70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.629450 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} err="failed to get container status \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": rpc error: code = NotFound desc = could not find container \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": container with ID starting with 70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.629488 4926 scope.go:117] "RemoveContainer" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.629860 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} err="failed to get container status \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": rpc error: code = NotFound desc = could not find container \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": container with ID starting with 0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.629981 4926 scope.go:117] "RemoveContainer" containerID="7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.630306 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f"} err="failed to get container status \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": rpc error: code = NotFound desc = could not find container \"7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f\": container with ID starting with 7d39e991784ad072e96cc69141eaa482dc38a182ceb88891595b61eb68158b1f not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.630417 4926 scope.go:117] "RemoveContainer" containerID="29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.630879 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97"} err="failed to get container status \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": rpc error: code = NotFound desc = could not find container \"29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97\": container with ID starting with 29e6409cd931fd4eb3337899e7626de89525dc725b35f9b49e139c13c054db97 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.630980 4926 scope.go:117] "RemoveContainer" containerID="c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.631425 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9"} err="failed to get container status \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": rpc error: code = NotFound desc = could not find container \"c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9\": container with ID starting with c84b898cf23483fe58b668697374ef7f900d75ad5f94c2c46eabae71b83c41b9 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.631520 4926 scope.go:117] "RemoveContainer" containerID="bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.631914 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0"} err="failed to get container status \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": rpc error: code = NotFound desc = could not find container \"bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0\": container with ID starting with bb6b29dec77cd788c7c41589269dc3dfbc3b4523cc359aea1f11bcdc374b5dd0 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.631941 4926 scope.go:117] "RemoveContainer" containerID="9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.632238 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e"} err="failed to get container status \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": rpc error: code = NotFound desc = could not find container \"9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e\": container with ID starting with 9f6bee47650799db0952f084714c8b38aff9672dab6395c78b552e3c3b88647e not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.632336 4926 scope.go:117] "RemoveContainer" containerID="19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.632800 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8"} err="failed to get container status \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": rpc error: code = NotFound desc = could not find container \"19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8\": container with ID starting with 19f4bdd4a168063ad4b7962e0914bbf56607e87cb4613c0e179f8c4b25b37bf8 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.632827 4926 scope.go:117] "RemoveContainer" containerID="54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.633158 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83"} err="failed to get container status \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": rpc error: code = NotFound desc = could not find container \"54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83\": container with ID starting with 54b4b9954a0abfaed22bf9cc3c64cb1bc47093b4efa3da3c294d3468d9927e83 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.633206 4926 scope.go:117] "RemoveContainer" containerID="48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.633547 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244"} err="failed to get container status \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": rpc error: code = NotFound desc = could not find container \"48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244\": container with ID starting with 48fe6782612327f764a071c3f5ae964bbcbb5366e845c20a4ea75d23bc6cb244 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.633646 4926 scope.go:117] "RemoveContainer" containerID="70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.633977 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210"} err="failed to get container status \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": rpc error: code = NotFound desc = could not find container \"70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210\": container with ID starting with 70a379958f8dea33719194be21332a50ef4956446a07214efdcb0f8b5fd55210 not found: ID does not exist" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.634001 4926 scope.go:117] "RemoveContainer" containerID="0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226" Nov 25 18:22:39 crc kubenswrapper[4926]: I1125 18:22:39.634306 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226"} err="failed to get container status \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": rpc error: code = NotFound desc = could not find container \"0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226\": container with ID starting with 0d50b783aa08f3a06989c68e355dfb508d1cbadb34037c2f7a97ccf36f3d9226 not found: ID does not exist" Nov 25 18:22:40 crc kubenswrapper[4926]: I1125 18:22:40.330167 4926 generic.go:334] "Generic (PLEG): container finished" podID="92b83f42-5c38-431f-9471-180c5aef549d" containerID="b48efd00b8b208201c5b68b10f3748ad1a48a439b90ee6953cdb82bd69aecf49" exitCode=0 Nov 25 18:22:40 crc kubenswrapper[4926]: I1125 18:22:40.349802 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62905073-17d2-4b78-9921-02a343480b34" path="/var/lib/kubelet/pods/62905073-17d2-4b78-9921-02a343480b34/volumes" Nov 25 18:22:40 crc kubenswrapper[4926]: I1125 18:22:40.351340 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerDied","Data":"b48efd00b8b208201c5b68b10f3748ad1a48a439b90ee6953cdb82bd69aecf49"} Nov 25 18:22:40 crc kubenswrapper[4926]: I1125 18:22:40.351409 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"c45197ac70078741e8085ccb91924367f05fd856280786da52debb2438e527fb"} Nov 25 18:22:41 crc kubenswrapper[4926]: I1125 18:22:41.342315 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"c6afb19d59f39cceb71a891a8dcc84200ac29cae8fa106d18be71b278b2032dc"} Nov 25 18:22:41 crc kubenswrapper[4926]: I1125 18:22:41.342867 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"c77bd20fb3b1bc4a65a00bf17d0e9c0abef3f96cb7fa8a54d38a8ea1a0794a7a"} Nov 25 18:22:41 crc kubenswrapper[4926]: I1125 18:22:41.342885 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"519388e06499b042de4a0de8afd345cf4c3f1caaa8e097fe66488d5a151102d3"} Nov 25 18:22:42 crc kubenswrapper[4926]: I1125 18:22:42.355009 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"f241a7e0359c25c2766af3d9142ac563ba5a1b8a456b99fa613ce4472fa8dc58"} Nov 25 18:22:42 crc kubenswrapper[4926]: I1125 18:22:42.355579 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"8d8ef182fe0ca6ddfd2d243ff0fef620f3efb2019fa6bbbe9e09aea3c3de14fa"} Nov 25 18:22:42 crc kubenswrapper[4926]: I1125 18:22:42.355608 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"f8578bc8ecf515e3f10db9d8558b65b1cdb11bffd9569fe3de00a8c004b09a01"} Nov 25 18:22:44 crc kubenswrapper[4926]: I1125 18:22:44.392478 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"497d1b159c53586523cc2fca6770878a158658dc9c6e40f315c41607485ab345"} Nov 25 18:22:47 crc kubenswrapper[4926]: I1125 18:22:47.419014 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" event={"ID":"92b83f42-5c38-431f-9471-180c5aef549d","Type":"ContainerStarted","Data":"890b243fd3552294b2842a01c48b1cf2c602d20e4874a481039b50bbd578b0df"} Nov 25 18:22:47 crc kubenswrapper[4926]: I1125 18:22:47.419655 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:47 crc kubenswrapper[4926]: I1125 18:22:47.419682 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:47 crc kubenswrapper[4926]: I1125 18:22:47.419693 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:47 crc kubenswrapper[4926]: I1125 18:22:47.468803 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" podStartSLOduration=8.468784168 podStartE2EDuration="8.468784168s" podCreationTimestamp="2025-11-25 18:22:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:22:47.466514473 +0000 UTC m=+597.852028088" watchObservedRunningTime="2025-11-25 18:22:47.468784168 +0000 UTC m=+597.854297773" Nov 25 18:22:47 crc kubenswrapper[4926]: I1125 18:22:47.485497 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:47 crc kubenswrapper[4926]: I1125 18:22:47.487592 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:22:50 crc kubenswrapper[4926]: I1125 18:22:50.576014 4926 scope.go:117] "RemoveContainer" containerID="c61e62dde5ef07eaf78e7f5d9dc15976d511db31771f551a45af597063e742bd" Nov 25 18:22:51 crc kubenswrapper[4926]: I1125 18:22:51.329348 4926 scope.go:117] "RemoveContainer" containerID="578dba399f1b6a1dc334859cac5b006c3b3927f1a980d23e434bc3236dd17e01" Nov 25 18:22:51 crc kubenswrapper[4926]: E1125 18:22:51.330123 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-49qhh_openshift-multus(78af77fa-0071-48e9-8b78-bdd92abfb013)\"" pod="openshift-multus/multus-49qhh" podUID="78af77fa-0071-48e9-8b78-bdd92abfb013" Nov 25 18:22:51 crc kubenswrapper[4926]: I1125 18:22:51.449688 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/2.log" Nov 25 18:23:03 crc kubenswrapper[4926]: I1125 18:23:03.541717 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:23:03 crc kubenswrapper[4926]: I1125 18:23:03.542674 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:23:03 crc kubenswrapper[4926]: I1125 18:23:03.542747 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:23:03 crc kubenswrapper[4926]: I1125 18:23:03.544452 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4780239f9864310c55b02fee7ce2fe9b6cc7572aec239cace54a7899ca327d6e"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:23:03 crc kubenswrapper[4926]: I1125 18:23:03.544708 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://4780239f9864310c55b02fee7ce2fe9b6cc7572aec239cace54a7899ca327d6e" gracePeriod=600 Nov 25 18:23:04 crc kubenswrapper[4926]: I1125 18:23:04.329304 4926 scope.go:117] "RemoveContainer" containerID="578dba399f1b6a1dc334859cac5b006c3b3927f1a980d23e434bc3236dd17e01" Nov 25 18:23:04 crc kubenswrapper[4926]: I1125 18:23:04.559860 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="4780239f9864310c55b02fee7ce2fe9b6cc7572aec239cace54a7899ca327d6e" exitCode=0 Nov 25 18:23:04 crc kubenswrapper[4926]: I1125 18:23:04.559952 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"4780239f9864310c55b02fee7ce2fe9b6cc7572aec239cace54a7899ca327d6e"} Nov 25 18:23:04 crc kubenswrapper[4926]: I1125 18:23:04.559995 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"fe9604ac0593158a6b911432198090e6a5ba75c5f094643a6db976009ac5d9c3"} Nov 25 18:23:04 crc kubenswrapper[4926]: I1125 18:23:04.560028 4926 scope.go:117] "RemoveContainer" containerID="2605307c38a76c7ce73c047e48783a7fd79aada84e3b313e0f1c6214104df2a9" Nov 25 18:23:04 crc kubenswrapper[4926]: I1125 18:23:04.565259 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-49qhh_78af77fa-0071-48e9-8b78-bdd92abfb013/kube-multus/2.log" Nov 25 18:23:04 crc kubenswrapper[4926]: I1125 18:23:04.565316 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-49qhh" event={"ID":"78af77fa-0071-48e9-8b78-bdd92abfb013","Type":"ContainerStarted","Data":"c12e1fb86618d9bf771a158276ca835386a222ebe0cc469e62f1017e02f563b8"} Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.483491 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5hsj2" Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.812972 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d"] Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.814521 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.817194 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.823272 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d"] Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.931108 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.931176 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:09 crc kubenswrapper[4926]: I1125 18:23:09.931221 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjlrv\" (UniqueName: \"kubernetes.io/projected/c148da96-30bb-41ad-8d86-1a3c60450fd7-kube-api-access-pjlrv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.032303 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.032418 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.032466 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjlrv\" (UniqueName: \"kubernetes.io/projected/c148da96-30bb-41ad-8d86-1a3c60450fd7-kube-api-access-pjlrv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.033497 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.033547 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.058047 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjlrv\" (UniqueName: \"kubernetes.io/projected/c148da96-30bb-41ad-8d86-1a3c60450fd7-kube-api-access-pjlrv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.137540 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:10 crc kubenswrapper[4926]: I1125 18:23:10.621533 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d"] Nov 25 18:23:11 crc kubenswrapper[4926]: I1125 18:23:11.616586 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" event={"ID":"c148da96-30bb-41ad-8d86-1a3c60450fd7","Type":"ContainerStarted","Data":"ad887cb4394ecd1deacc26ed0599a1fd0b4e78ad33666b65865d24148fc69a73"} Nov 25 18:23:11 crc kubenswrapper[4926]: I1125 18:23:11.617130 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" event={"ID":"c148da96-30bb-41ad-8d86-1a3c60450fd7","Type":"ContainerStarted","Data":"6980562fa0664b0816e685d68ee5b0529802886f79ce0efc91dc6c4763a56f1f"} Nov 25 18:23:12 crc kubenswrapper[4926]: I1125 18:23:12.627217 4926 generic.go:334] "Generic (PLEG): container finished" podID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerID="ad887cb4394ecd1deacc26ed0599a1fd0b4e78ad33666b65865d24148fc69a73" exitCode=0 Nov 25 18:23:12 crc kubenswrapper[4926]: I1125 18:23:12.627355 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" event={"ID":"c148da96-30bb-41ad-8d86-1a3c60450fd7","Type":"ContainerDied","Data":"ad887cb4394ecd1deacc26ed0599a1fd0b4e78ad33666b65865d24148fc69a73"} Nov 25 18:23:14 crc kubenswrapper[4926]: I1125 18:23:14.650729 4926 generic.go:334] "Generic (PLEG): container finished" podID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerID="bfb666443a6c803b05c45b96a777ea70ef68a105ef6cd3ebf75f5023510a9b48" exitCode=0 Nov 25 18:23:14 crc kubenswrapper[4926]: I1125 18:23:14.650902 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" event={"ID":"c148da96-30bb-41ad-8d86-1a3c60450fd7","Type":"ContainerDied","Data":"bfb666443a6c803b05c45b96a777ea70ef68a105ef6cd3ebf75f5023510a9b48"} Nov 25 18:23:15 crc kubenswrapper[4926]: I1125 18:23:15.663245 4926 generic.go:334] "Generic (PLEG): container finished" podID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerID="dba22af78ff3543a4c63d6eb7de80ba2fcca3861b92d2eb6026263dafff5f5fd" exitCode=0 Nov 25 18:23:15 crc kubenswrapper[4926]: I1125 18:23:15.663667 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" event={"ID":"c148da96-30bb-41ad-8d86-1a3c60450fd7","Type":"ContainerDied","Data":"dba22af78ff3543a4c63d6eb7de80ba2fcca3861b92d2eb6026263dafff5f5fd"} Nov 25 18:23:16 crc kubenswrapper[4926]: I1125 18:23:16.982387 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.042648 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjlrv\" (UniqueName: \"kubernetes.io/projected/c148da96-30bb-41ad-8d86-1a3c60450fd7-kube-api-access-pjlrv\") pod \"c148da96-30bb-41ad-8d86-1a3c60450fd7\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.042722 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-bundle\") pod \"c148da96-30bb-41ad-8d86-1a3c60450fd7\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.042793 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-util\") pod \"c148da96-30bb-41ad-8d86-1a3c60450fd7\" (UID: \"c148da96-30bb-41ad-8d86-1a3c60450fd7\") " Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.048415 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-bundle" (OuterVolumeSpecName: "bundle") pod "c148da96-30bb-41ad-8d86-1a3c60450fd7" (UID: "c148da96-30bb-41ad-8d86-1a3c60450fd7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.051220 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c148da96-30bb-41ad-8d86-1a3c60450fd7-kube-api-access-pjlrv" (OuterVolumeSpecName: "kube-api-access-pjlrv") pod "c148da96-30bb-41ad-8d86-1a3c60450fd7" (UID: "c148da96-30bb-41ad-8d86-1a3c60450fd7"). InnerVolumeSpecName "kube-api-access-pjlrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.144511 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjlrv\" (UniqueName: \"kubernetes.io/projected/c148da96-30bb-41ad-8d86-1a3c60450fd7-kube-api-access-pjlrv\") on node \"crc\" DevicePath \"\"" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.144556 4926 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.419301 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-util" (OuterVolumeSpecName: "util") pod "c148da96-30bb-41ad-8d86-1a3c60450fd7" (UID: "c148da96-30bb-41ad-8d86-1a3c60450fd7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.466610 4926 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c148da96-30bb-41ad-8d86-1a3c60450fd7-util\") on node \"crc\" DevicePath \"\"" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.684560 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" event={"ID":"c148da96-30bb-41ad-8d86-1a3c60450fd7","Type":"ContainerDied","Data":"6980562fa0664b0816e685d68ee5b0529802886f79ce0efc91dc6c4763a56f1f"} Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.684617 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6980562fa0664b0816e685d68ee5b0529802886f79ce0efc91dc6c4763a56f1f" Nov 25 18:23:17 crc kubenswrapper[4926]: I1125 18:23:17.684714 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d" Nov 25 18:23:17 crc kubenswrapper[4926]: E1125 18:23:17.804977 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc148da96_30bb_41ad_8d86_1a3c60450fd7.slice/crio-6980562fa0664b0816e685d68ee5b0529802886f79ce0efc91dc6c4763a56f1f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc148da96_30bb_41ad_8d86_1a3c60450fd7.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.581658 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj"] Nov 25 18:23:27 crc kubenswrapper[4926]: E1125 18:23:27.582551 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerName="pull" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.582566 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerName="pull" Nov 25 18:23:27 crc kubenswrapper[4926]: E1125 18:23:27.582583 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerName="util" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.582589 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerName="util" Nov 25 18:23:27 crc kubenswrapper[4926]: E1125 18:23:27.582599 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerName="extract" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.582606 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerName="extract" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.582705 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c148da96-30bb-41ad-8d86-1a3c60450fd7" containerName="extract" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.583122 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.585891 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.586311 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.586482 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-vlb2p" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.596656 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj"] Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.667796 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnm64\" (UniqueName: \"kubernetes.io/projected/5d4652b0-e12f-40d1-8370-e32b2aa51b96-kube-api-access-mnm64\") pod \"obo-prometheus-operator-668cf9dfbb-km4cj\" (UID: \"5d4652b0-e12f-40d1-8370-e32b2aa51b96\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.708400 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh"] Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.709240 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.712849 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.713339 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-xl5tr" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.726542 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh"] Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.731626 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx"] Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.732869 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.755325 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx"] Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.770448 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4adc4849-d8a6-4eff-8fd9-bf801de1ab33-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-829cx\" (UID: \"4adc4849-d8a6-4eff-8fd9-bf801de1ab33\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.770854 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4adc4849-d8a6-4eff-8fd9-bf801de1ab33-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-829cx\" (UID: \"4adc4849-d8a6-4eff-8fd9-bf801de1ab33\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.770982 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f37f6813-7831-4557-91c3-499fa6f790a9-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh\" (UID: \"f37f6813-7831-4557-91c3-499fa6f790a9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.771094 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f37f6813-7831-4557-91c3-499fa6f790a9-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh\" (UID: \"f37f6813-7831-4557-91c3-499fa6f790a9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.771206 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnm64\" (UniqueName: \"kubernetes.io/projected/5d4652b0-e12f-40d1-8370-e32b2aa51b96-kube-api-access-mnm64\") pod \"obo-prometheus-operator-668cf9dfbb-km4cj\" (UID: \"5d4652b0-e12f-40d1-8370-e32b2aa51b96\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.794214 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnm64\" (UniqueName: \"kubernetes.io/projected/5d4652b0-e12f-40d1-8370-e32b2aa51b96-kube-api-access-mnm64\") pod \"obo-prometheus-operator-668cf9dfbb-km4cj\" (UID: \"5d4652b0-e12f-40d1-8370-e32b2aa51b96\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.872223 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4adc4849-d8a6-4eff-8fd9-bf801de1ab33-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-829cx\" (UID: \"4adc4849-d8a6-4eff-8fd9-bf801de1ab33\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.872827 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4adc4849-d8a6-4eff-8fd9-bf801de1ab33-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-829cx\" (UID: \"4adc4849-d8a6-4eff-8fd9-bf801de1ab33\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.872967 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f37f6813-7831-4557-91c3-499fa6f790a9-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh\" (UID: \"f37f6813-7831-4557-91c3-499fa6f790a9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.873081 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f37f6813-7831-4557-91c3-499fa6f790a9-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh\" (UID: \"f37f6813-7831-4557-91c3-499fa6f790a9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.879447 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4adc4849-d8a6-4eff-8fd9-bf801de1ab33-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-829cx\" (UID: \"4adc4849-d8a6-4eff-8fd9-bf801de1ab33\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.883282 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f37f6813-7831-4557-91c3-499fa6f790a9-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh\" (UID: \"f37f6813-7831-4557-91c3-499fa6f790a9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.884008 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4adc4849-d8a6-4eff-8fd9-bf801de1ab33-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-829cx\" (UID: \"4adc4849-d8a6-4eff-8fd9-bf801de1ab33\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.904079 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.906026 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f37f6813-7831-4557-91c3-499fa6f790a9-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh\" (UID: \"f37f6813-7831-4557-91c3-499fa6f790a9\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.934571 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-45q9w"] Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.935548 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.944302 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-45q9w"] Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.944867 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-2jdbr" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.945213 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.976014 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/48648219-c573-4084-a23b-17ef23df2666-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-45q9w\" (UID: \"48648219-c573-4084-a23b-17ef23df2666\") " pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:27 crc kubenswrapper[4926]: I1125 18:23:27.976080 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkzjh\" (UniqueName: \"kubernetes.io/projected/48648219-c573-4084-a23b-17ef23df2666-kube-api-access-hkzjh\") pod \"observability-operator-d8bb48f5d-45q9w\" (UID: \"48648219-c573-4084-a23b-17ef23df2666\") " pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.023549 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.051071 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.077962 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/48648219-c573-4084-a23b-17ef23df2666-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-45q9w\" (UID: \"48648219-c573-4084-a23b-17ef23df2666\") " pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.079996 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkzjh\" (UniqueName: \"kubernetes.io/projected/48648219-c573-4084-a23b-17ef23df2666-kube-api-access-hkzjh\") pod \"observability-operator-d8bb48f5d-45q9w\" (UID: \"48648219-c573-4084-a23b-17ef23df2666\") " pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.095434 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/48648219-c573-4084-a23b-17ef23df2666-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-45q9w\" (UID: \"48648219-c573-4084-a23b-17ef23df2666\") " pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.100952 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkzjh\" (UniqueName: \"kubernetes.io/projected/48648219-c573-4084-a23b-17ef23df2666-kube-api-access-hkzjh\") pod \"observability-operator-d8bb48f5d-45q9w\" (UID: \"48648219-c573-4084-a23b-17ef23df2666\") " pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.143259 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-cq9q8"] Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.144344 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.148865 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-pxc6r" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.168728 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-cq9q8"] Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.184548 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlwqf\" (UniqueName: \"kubernetes.io/projected/42a2f9e2-7492-45ef-9049-b617d5c1c36d-kube-api-access-dlwqf\") pod \"perses-operator-5446b9c989-cq9q8\" (UID: \"42a2f9e2-7492-45ef-9049-b617d5c1c36d\") " pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.184634 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/42a2f9e2-7492-45ef-9049-b617d5c1c36d-openshift-service-ca\") pod \"perses-operator-5446b9c989-cq9q8\" (UID: \"42a2f9e2-7492-45ef-9049-b617d5c1c36d\") " pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.286522 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlwqf\" (UniqueName: \"kubernetes.io/projected/42a2f9e2-7492-45ef-9049-b617d5c1c36d-kube-api-access-dlwqf\") pod \"perses-operator-5446b9c989-cq9q8\" (UID: \"42a2f9e2-7492-45ef-9049-b617d5c1c36d\") " pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.286575 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/42a2f9e2-7492-45ef-9049-b617d5c1c36d-openshift-service-ca\") pod \"perses-operator-5446b9c989-cq9q8\" (UID: \"42a2f9e2-7492-45ef-9049-b617d5c1c36d\") " pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.287592 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/42a2f9e2-7492-45ef-9049-b617d5c1c36d-openshift-service-ca\") pod \"perses-operator-5446b9c989-cq9q8\" (UID: \"42a2f9e2-7492-45ef-9049-b617d5c1c36d\") " pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.295816 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.336435 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlwqf\" (UniqueName: \"kubernetes.io/projected/42a2f9e2-7492-45ef-9049-b617d5c1c36d-kube-api-access-dlwqf\") pod \"perses-operator-5446b9c989-cq9q8\" (UID: \"42a2f9e2-7492-45ef-9049-b617d5c1c36d\") " pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.503483 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh"] Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.515308 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.519796 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj"] Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.542053 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx"] Nov 25 18:23:28 crc kubenswrapper[4926]: W1125 18:23:28.590500 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4adc4849_d8a6_4eff_8fd9_bf801de1ab33.slice/crio-4fd82ff5484d06c2280077c93f656d068b51471b2013f1c1a0f856d2ed84c4bd WatchSource:0}: Error finding container 4fd82ff5484d06c2280077c93f656d068b51471b2013f1c1a0f856d2ed84c4bd: Status 404 returned error can't find the container with id 4fd82ff5484d06c2280077c93f656d068b51471b2013f1c1a0f856d2ed84c4bd Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.694999 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-45q9w"] Nov 25 18:23:28 crc kubenswrapper[4926]: W1125 18:23:28.714564 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48648219_c573_4084_a23b_17ef23df2666.slice/crio-a680a5fdb9ba2b9f1e6903ffa60b625c46d1d84cf0a72e56b9f42abcf37bdf8c WatchSource:0}: Error finding container a680a5fdb9ba2b9f1e6903ffa60b625c46d1d84cf0a72e56b9f42abcf37bdf8c: Status 404 returned error can't find the container with id a680a5fdb9ba2b9f1e6903ffa60b625c46d1d84cf0a72e56b9f42abcf37bdf8c Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.761269 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" event={"ID":"f37f6813-7831-4557-91c3-499fa6f790a9","Type":"ContainerStarted","Data":"e2057bd75fc41e4c887e77def5f34656bc6821f1b47d7aa64c6cbb5fab6e6460"} Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.762090 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" event={"ID":"4adc4849-d8a6-4eff-8fd9-bf801de1ab33","Type":"ContainerStarted","Data":"4fd82ff5484d06c2280077c93f656d068b51471b2013f1c1a0f856d2ed84c4bd"} Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.763922 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" event={"ID":"5d4652b0-e12f-40d1-8370-e32b2aa51b96","Type":"ContainerStarted","Data":"d2b80d7d44c66e7bd3229bdba8708fa7561bea0cbaa1cc1ac8b8b16ffae47cfd"} Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.765234 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" event={"ID":"48648219-c573-4084-a23b-17ef23df2666","Type":"ContainerStarted","Data":"a680a5fdb9ba2b9f1e6903ffa60b625c46d1d84cf0a72e56b9f42abcf37bdf8c"} Nov 25 18:23:28 crc kubenswrapper[4926]: I1125 18:23:28.815267 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-cq9q8"] Nov 25 18:23:29 crc kubenswrapper[4926]: I1125 18:23:29.792884 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" event={"ID":"42a2f9e2-7492-45ef-9049-b617d5c1c36d","Type":"ContainerStarted","Data":"ff7f73109f48f156bf546e45ecf8013719491d922cd868e81ca8805f729a12e8"} Nov 25 18:23:48 crc kubenswrapper[4926]: E1125 18:23:48.376915 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385" Nov 25 18:23:48 crc kubenswrapper[4926]: E1125 18:23:48.378514 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:perses-operator,Image:registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openshift-service-ca,ReadOnly:true,MountPath:/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dlwqf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod perses-operator-5446b9c989-cq9q8_openshift-operators(42a2f9e2-7492-45ef-9049-b617d5c1c36d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 18:23:48 crc kubenswrapper[4926]: E1125 18:23:48.379811 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" podUID="42a2f9e2-7492-45ef-9049-b617d5c1c36d" Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.948044 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" event={"ID":"48648219-c573-4084-a23b-17ef23df2666","Type":"ContainerStarted","Data":"da96c067b6d67039a17a9d6a91f7d69b8a90a06213b76db7f91fd2c24e0468dc"} Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.948606 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.949470 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" event={"ID":"f37f6813-7831-4557-91c3-499fa6f790a9","Type":"ContainerStarted","Data":"c43c8cb823042388bf4a9de59078a8b7e273f19a29dbd8c646694ce5775e90f0"} Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.951084 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" start-of-body= Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.951259 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" event={"ID":"4adc4849-d8a6-4eff-8fd9-bf801de1ab33","Type":"ContainerStarted","Data":"7f1dce420b8e541ed5049328955d1ca9275315187ba57fd69526685d4bbee6c1"} Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.951352 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.954142 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" event={"ID":"5d4652b0-e12f-40d1-8370-e32b2aa51b96","Type":"ContainerStarted","Data":"55ef837df7b272f0a8164ab6f16cf676ac63aa0bb44b96ee0f2ba001a1ca510d"} Nov 25 18:23:48 crc kubenswrapper[4926]: E1125 18:23:48.956194 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385\\\"\"" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" podUID="42a2f9e2-7492-45ef-9049-b617d5c1c36d" Nov 25 18:23:48 crc kubenswrapper[4926]: I1125 18:23:48.972228 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podStartSLOduration=2.212862875 podStartE2EDuration="21.972204081s" podCreationTimestamp="2025-11-25 18:23:27 +0000 UTC" firstStartedPulling="2025-11-25 18:23:28.732523895 +0000 UTC m=+639.118037500" lastFinishedPulling="2025-11-25 18:23:48.491865081 +0000 UTC m=+658.877378706" observedRunningTime="2025-11-25 18:23:48.970111195 +0000 UTC m=+659.355624830" watchObservedRunningTime="2025-11-25 18:23:48.972204081 +0000 UTC m=+659.357717726" Nov 25 18:23:49 crc kubenswrapper[4926]: I1125 18:23:49.011505 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh" podStartSLOduration=2.137958866 podStartE2EDuration="22.01147623s" podCreationTimestamp="2025-11-25 18:23:27 +0000 UTC" firstStartedPulling="2025-11-25 18:23:28.57878426 +0000 UTC m=+638.964297865" lastFinishedPulling="2025-11-25 18:23:48.452301604 +0000 UTC m=+658.837815229" observedRunningTime="2025-11-25 18:23:49.007655187 +0000 UTC m=+659.393168832" watchObservedRunningTime="2025-11-25 18:23:49.01147623 +0000 UTC m=+659.396989865" Nov 25 18:23:49 crc kubenswrapper[4926]: I1125 18:23:49.055876 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7fbd78f954-829cx" podStartSLOduration=2.204107669 podStartE2EDuration="22.055845814s" podCreationTimestamp="2025-11-25 18:23:27 +0000 UTC" firstStartedPulling="2025-11-25 18:23:28.592416342 +0000 UTC m=+638.977929947" lastFinishedPulling="2025-11-25 18:23:48.444154447 +0000 UTC m=+658.829668092" observedRunningTime="2025-11-25 18:23:49.050884812 +0000 UTC m=+659.436398427" watchObservedRunningTime="2025-11-25 18:23:49.055845814 +0000 UTC m=+659.441359459" Nov 25 18:23:49 crc kubenswrapper[4926]: I1125 18:23:49.086192 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-km4cj" podStartSLOduration=2.209185894 podStartE2EDuration="22.086165314s" podCreationTimestamp="2025-11-25 18:23:27 +0000 UTC" firstStartedPulling="2025-11-25 18:23:28.571552484 +0000 UTC m=+638.957066089" lastFinishedPulling="2025-11-25 18:23:48.448531864 +0000 UTC m=+658.834045509" observedRunningTime="2025-11-25 18:23:49.081000916 +0000 UTC m=+659.466514531" watchObservedRunningTime="2025-11-25 18:23:49.086165314 +0000 UTC m=+659.471678939" Nov 25 18:23:49 crc kubenswrapper[4926]: I1125 18:23:49.962441 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 18:24:03 crc kubenswrapper[4926]: I1125 18:24:03.050172 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" event={"ID":"42a2f9e2-7492-45ef-9049-b617d5c1c36d","Type":"ContainerStarted","Data":"16370cf74ea4980ff2116c62d27d4fc84162020107b8933f935bb4cd9de966ad"} Nov 25 18:24:03 crc kubenswrapper[4926]: I1125 18:24:03.051516 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:24:08 crc kubenswrapper[4926]: I1125 18:24:08.519762 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" Nov 25 18:24:08 crc kubenswrapper[4926]: I1125 18:24:08.559995 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" podStartSLOduration=7.354860584 podStartE2EDuration="40.559968712s" podCreationTimestamp="2025-11-25 18:23:28 +0000 UTC" firstStartedPulling="2025-11-25 18:23:28.82675285 +0000 UTC m=+639.212266455" lastFinishedPulling="2025-11-25 18:24:02.031860978 +0000 UTC m=+672.417374583" observedRunningTime="2025-11-25 18:24:03.079983804 +0000 UTC m=+673.465497449" watchObservedRunningTime="2025-11-25 18:24:08.559968712 +0000 UTC m=+678.945482327" Nov 25 18:24:26 crc kubenswrapper[4926]: I1125 18:24:26.917996 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd"] Nov 25 18:24:26 crc kubenswrapper[4926]: I1125 18:24:26.921704 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:26 crc kubenswrapper[4926]: I1125 18:24:26.923777 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 18:24:26 crc kubenswrapper[4926]: I1125 18:24:26.934153 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd"] Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.064362 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.064750 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.064946 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5pbs\" (UniqueName: \"kubernetes.io/projected/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-kube-api-access-v5pbs\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.166532 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.166595 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5pbs\" (UniqueName: \"kubernetes.io/projected/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-kube-api-access-v5pbs\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.166660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.167405 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.167554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.190511 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5pbs\" (UniqueName: \"kubernetes.io/projected/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-kube-api-access-v5pbs\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.251511 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:27 crc kubenswrapper[4926]: I1125 18:24:27.522409 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd"] Nov 25 18:24:28 crc kubenswrapper[4926]: I1125 18:24:28.243765 4926 generic.go:334] "Generic (PLEG): container finished" podID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerID="efe9771488517ddf7f8f25c9de1e056e6021e258d613e8b41167f7122cb5f975" exitCode=0 Nov 25 18:24:28 crc kubenswrapper[4926]: I1125 18:24:28.243822 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" event={"ID":"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131","Type":"ContainerDied","Data":"efe9771488517ddf7f8f25c9de1e056e6021e258d613e8b41167f7122cb5f975"} Nov 25 18:24:28 crc kubenswrapper[4926]: I1125 18:24:28.244339 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" event={"ID":"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131","Type":"ContainerStarted","Data":"c54b5d9d604fc631908f3bdeb454611b9e3e87e57b5f33b40cbb378b91176830"} Nov 25 18:24:30 crc kubenswrapper[4926]: I1125 18:24:30.278340 4926 generic.go:334] "Generic (PLEG): container finished" podID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerID="43007c8d99a63d2c16068e68d49c7ee3a19276be9ba7186a21d019c567ff99e0" exitCode=0 Nov 25 18:24:30 crc kubenswrapper[4926]: I1125 18:24:30.279042 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" event={"ID":"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131","Type":"ContainerDied","Data":"43007c8d99a63d2c16068e68d49c7ee3a19276be9ba7186a21d019c567ff99e0"} Nov 25 18:24:31 crc kubenswrapper[4926]: I1125 18:24:31.291456 4926 generic.go:334] "Generic (PLEG): container finished" podID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerID="1843c154f6aee808702227399874ba46576bda64066f6c1c0eaf734a03118d87" exitCode=0 Nov 25 18:24:31 crc kubenswrapper[4926]: I1125 18:24:31.291547 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" event={"ID":"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131","Type":"ContainerDied","Data":"1843c154f6aee808702227399874ba46576bda64066f6c1c0eaf734a03118d87"} Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.560619 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.671499 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5pbs\" (UniqueName: \"kubernetes.io/projected/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-kube-api-access-v5pbs\") pod \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.671582 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-bundle\") pod \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.671647 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-util\") pod \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\" (UID: \"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131\") " Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.672909 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-bundle" (OuterVolumeSpecName: "bundle") pod "6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" (UID: "6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.678640 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-kube-api-access-v5pbs" (OuterVolumeSpecName: "kube-api-access-v5pbs") pod "6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" (UID: "6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131"). InnerVolumeSpecName "kube-api-access-v5pbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.686298 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-util" (OuterVolumeSpecName: "util") pod "6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" (UID: "6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.773628 4926 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-util\") on node \"crc\" DevicePath \"\"" Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.773667 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5pbs\" (UniqueName: \"kubernetes.io/projected/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-kube-api-access-v5pbs\") on node \"crc\" DevicePath \"\"" Nov 25 18:24:32 crc kubenswrapper[4926]: I1125 18:24:32.773682 4926 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:24:33 crc kubenswrapper[4926]: I1125 18:24:33.312860 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" event={"ID":"6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131","Type":"ContainerDied","Data":"c54b5d9d604fc631908f3bdeb454611b9e3e87e57b5f33b40cbb378b91176830"} Nov 25 18:24:33 crc kubenswrapper[4926]: I1125 18:24:33.312917 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c54b5d9d604fc631908f3bdeb454611b9e3e87e57b5f33b40cbb378b91176830" Nov 25 18:24:33 crc kubenswrapper[4926]: I1125 18:24:33.312968 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.753672 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-588xx"] Nov 25 18:24:38 crc kubenswrapper[4926]: E1125 18:24:38.754765 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerName="util" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.754780 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerName="util" Nov 25 18:24:38 crc kubenswrapper[4926]: E1125 18:24:38.754789 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerName="extract" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.754795 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerName="extract" Nov 25 18:24:38 crc kubenswrapper[4926]: E1125 18:24:38.754804 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerName="pull" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.754812 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerName="pull" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.754974 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131" containerName="extract" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.755412 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.757494 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.757645 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.757800 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-r689z" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.768173 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-588xx"] Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.859341 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m79wc\" (UniqueName: \"kubernetes.io/projected/57273d48-1562-49dc-824b-1dc71dd89583-kube-api-access-m79wc\") pod \"nmstate-operator-557fdffb88-588xx\" (UID: \"57273d48-1562-49dc-824b-1dc71dd89583\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.960914 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m79wc\" (UniqueName: \"kubernetes.io/projected/57273d48-1562-49dc-824b-1dc71dd89583-kube-api-access-m79wc\") pod \"nmstate-operator-557fdffb88-588xx\" (UID: \"57273d48-1562-49dc-824b-1dc71dd89583\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" Nov 25 18:24:38 crc kubenswrapper[4926]: I1125 18:24:38.988491 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m79wc\" (UniqueName: \"kubernetes.io/projected/57273d48-1562-49dc-824b-1dc71dd89583-kube-api-access-m79wc\") pod \"nmstate-operator-557fdffb88-588xx\" (UID: \"57273d48-1562-49dc-824b-1dc71dd89583\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" Nov 25 18:24:39 crc kubenswrapper[4926]: I1125 18:24:39.071658 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" Nov 25 18:24:39 crc kubenswrapper[4926]: I1125 18:24:39.302421 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-588xx"] Nov 25 18:24:39 crc kubenswrapper[4926]: I1125 18:24:39.350938 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" event={"ID":"57273d48-1562-49dc-824b-1dc71dd89583","Type":"ContainerStarted","Data":"d7681abd4a644219fc5e03657c912cf084ba932bfa970a09fa54dd5f1142b350"} Nov 25 18:24:42 crc kubenswrapper[4926]: I1125 18:24:42.376909 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" event={"ID":"57273d48-1562-49dc-824b-1dc71dd89583","Type":"ContainerStarted","Data":"96d110c2d98b52a5d80bd5d4f9e6fc486471a6982aabeac3dfe7881735d00985"} Nov 25 18:24:42 crc kubenswrapper[4926]: I1125 18:24:42.400981 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-588xx" podStartSLOduration=2.064578656 podStartE2EDuration="4.40095865s" podCreationTimestamp="2025-11-25 18:24:38 +0000 UTC" firstStartedPulling="2025-11-25 18:24:39.315048527 +0000 UTC m=+709.700562142" lastFinishedPulling="2025-11-25 18:24:41.651428531 +0000 UTC m=+712.036942136" observedRunningTime="2025-11-25 18:24:42.395987928 +0000 UTC m=+712.781501553" watchObservedRunningTime="2025-11-25 18:24:42.40095865 +0000 UTC m=+712.786472265" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.191255 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.192760 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.196767 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-s44c6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.205381 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.207974 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.210490 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.235962 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.239925 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-pjwht"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.240887 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.247015 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.290294 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-gblp6\" (UID: \"b84b6271-61b8-4bc0-8a1b-e09991c9e6af\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.290403 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87lnz\" (UniqueName: \"kubernetes.io/projected/6a9e2b29-259a-4f14-8308-975a8d167ce3-kube-api-access-87lnz\") pod \"nmstate-metrics-5dcf9c57c5-rmxx7\" (UID: \"6a9e2b29-259a-4f14-8308-975a8d167ce3\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.290469 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfm9g\" (UniqueName: \"kubernetes.io/projected/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-kube-api-access-gfm9g\") pod \"nmstate-webhook-6b89b748d8-gblp6\" (UID: \"b84b6271-61b8-4bc0-8a1b-e09991c9e6af\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.378827 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.390758 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.392305 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-nmstate-lock\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.392383 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-ovs-socket\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.392423 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfm9g\" (UniqueName: \"kubernetes.io/projected/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-kube-api-access-gfm9g\") pod \"nmstate-webhook-6b89b748d8-gblp6\" (UID: \"b84b6271-61b8-4bc0-8a1b-e09991c9e6af\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.392452 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkqjv\" (UniqueName: \"kubernetes.io/projected/d4ef439a-e50d-43bd-87d1-86fea196f862-kube-api-access-kkqjv\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.392500 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-dbus-socket\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.392519 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-gblp6\" (UID: \"b84b6271-61b8-4bc0-8a1b-e09991c9e6af\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.392554 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87lnz\" (UniqueName: \"kubernetes.io/projected/6a9e2b29-259a-4f14-8308-975a8d167ce3-kube-api-access-87lnz\") pod \"nmstate-metrics-5dcf9c57c5-rmxx7\" (UID: \"6a9e2b29-259a-4f14-8308-975a8d167ce3\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" Nov 25 18:24:48 crc kubenswrapper[4926]: E1125 18:24:48.392850 4926 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 18:24:48 crc kubenswrapper[4926]: E1125 18:24:48.392975 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-tls-key-pair podName:b84b6271-61b8-4bc0-8a1b-e09991c9e6af nodeName:}" failed. No retries permitted until 2025-11-25 18:24:48.89291298 +0000 UTC m=+719.278426585 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-tls-key-pair") pod "nmstate-webhook-6b89b748d8-gblp6" (UID: "b84b6271-61b8-4bc0-8a1b-e09991c9e6af") : secret "openshift-nmstate-webhook" not found Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.394232 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.394555 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-w98f9" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.394778 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.395352 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.421100 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87lnz\" (UniqueName: \"kubernetes.io/projected/6a9e2b29-259a-4f14-8308-975a8d167ce3-kube-api-access-87lnz\") pod \"nmstate-metrics-5dcf9c57c5-rmxx7\" (UID: \"6a9e2b29-259a-4f14-8308-975a8d167ce3\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.435468 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfm9g\" (UniqueName: \"kubernetes.io/projected/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-kube-api-access-gfm9g\") pod \"nmstate-webhook-6b89b748d8-gblp6\" (UID: \"b84b6271-61b8-4bc0-8a1b-e09991c9e6af\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.493720 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/45e30b58-3691-41a3-afa2-ba29332f53aa-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.493813 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-dbus-socket\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.493880 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr6pv\" (UniqueName: \"kubernetes.io/projected/45e30b58-3691-41a3-afa2-ba29332f53aa-kube-api-access-mr6pv\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.493925 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-nmstate-lock\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.493945 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-ovs-socket\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.493961 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/45e30b58-3691-41a3-afa2-ba29332f53aa-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.493990 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkqjv\" (UniqueName: \"kubernetes.io/projected/d4ef439a-e50d-43bd-87d1-86fea196f862-kube-api-access-kkqjv\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.494021 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-nmstate-lock\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.494088 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-ovs-socket\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.494249 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d4ef439a-e50d-43bd-87d1-86fea196f862-dbus-socket\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.511567 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.521484 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkqjv\" (UniqueName: \"kubernetes.io/projected/d4ef439a-e50d-43bd-87d1-86fea196f862-kube-api-access-kkqjv\") pod \"nmstate-handler-pjwht\" (UID: \"d4ef439a-e50d-43bd-87d1-86fea196f862\") " pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.556627 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.589271 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-c8b75d446-w2wqs"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.590430 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.594934 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/45e30b58-3691-41a3-afa2-ba29332f53aa-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.594998 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/45e30b58-3691-41a3-afa2-ba29332f53aa-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.595074 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr6pv\" (UniqueName: \"kubernetes.io/projected/45e30b58-3691-41a3-afa2-ba29332f53aa-kube-api-access-mr6pv\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: E1125 18:24:48.595463 4926 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 18:24:48 crc kubenswrapper[4926]: E1125 18:24:48.595515 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/45e30b58-3691-41a3-afa2-ba29332f53aa-plugin-serving-cert podName:45e30b58-3691-41a3-afa2-ba29332f53aa nodeName:}" failed. No retries permitted until 2025-11-25 18:24:49.095501979 +0000 UTC m=+719.481015584 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/45e30b58-3691-41a3-afa2-ba29332f53aa-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-ftcsp" (UID: "45e30b58-3691-41a3-afa2-ba29332f53aa") : secret "plugin-serving-cert" not found Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.596928 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/45e30b58-3691-41a3-afa2-ba29332f53aa-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.616838 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-c8b75d446-w2wqs"] Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.621831 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr6pv\" (UniqueName: \"kubernetes.io/projected/45e30b58-3691-41a3-afa2-ba29332f53aa-kube-api-access-mr6pv\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.701293 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-service-ca\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.701888 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbm5n\" (UniqueName: \"kubernetes.io/projected/1caf13bd-439a-4b17-9103-830033d4fdd9-kube-api-access-jbm5n\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.701956 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-trusted-ca-bundle\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.702042 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-console-config\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.702082 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1caf13bd-439a-4b17-9103-830033d4fdd9-console-oauth-config\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.702116 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1caf13bd-439a-4b17-9103-830033d4fdd9-console-serving-cert\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.702160 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-oauth-serving-cert\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.803722 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1caf13bd-439a-4b17-9103-830033d4fdd9-console-oauth-config\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.803781 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1caf13bd-439a-4b17-9103-830033d4fdd9-console-serving-cert\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.803816 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-oauth-serving-cert\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.803846 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-service-ca\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.803875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbm5n\" (UniqueName: \"kubernetes.io/projected/1caf13bd-439a-4b17-9103-830033d4fdd9-kube-api-access-jbm5n\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.803906 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-trusted-ca-bundle\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.803947 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-console-config\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.805136 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-console-config\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.805477 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-trusted-ca-bundle\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.805748 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-service-ca\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.806186 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1caf13bd-439a-4b17-9103-830033d4fdd9-oauth-serving-cert\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.808426 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1caf13bd-439a-4b17-9103-830033d4fdd9-console-serving-cert\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.808447 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1caf13bd-439a-4b17-9103-830033d4fdd9-console-oauth-config\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.821109 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbm5n\" (UniqueName: \"kubernetes.io/projected/1caf13bd-439a-4b17-9103-830033d4fdd9-kube-api-access-jbm5n\") pod \"console-c8b75d446-w2wqs\" (UID: \"1caf13bd-439a-4b17-9103-830033d4fdd9\") " pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.905700 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-gblp6\" (UID: \"b84b6271-61b8-4bc0-8a1b-e09991c9e6af\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.909311 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b84b6271-61b8-4bc0-8a1b-e09991c9e6af-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-gblp6\" (UID: \"b84b6271-61b8-4bc0-8a1b-e09991c9e6af\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:48 crc kubenswrapper[4926]: I1125 18:24:48.913524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.015203 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7"] Nov 25 18:24:49 crc kubenswrapper[4926]: W1125 18:24:49.021834 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a9e2b29_259a_4f14_8308_975a8d167ce3.slice/crio-802596c0edfc45f65df46cc4bae8e9aa1dc175a60d249e9357aee38ddf812268 WatchSource:0}: Error finding container 802596c0edfc45f65df46cc4bae8e9aa1dc175a60d249e9357aee38ddf812268: Status 404 returned error can't find the container with id 802596c0edfc45f65df46cc4bae8e9aa1dc175a60d249e9357aee38ddf812268 Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.110468 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/45e30b58-3691-41a3-afa2-ba29332f53aa-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.118143 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/45e30b58-3691-41a3-afa2-ba29332f53aa-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-ftcsp\" (UID: \"45e30b58-3691-41a3-afa2-ba29332f53aa\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.122009 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.250204 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-c8b75d446-w2wqs"] Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.313441 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.373243 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6"] Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.445629 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-c8b75d446-w2wqs" event={"ID":"1caf13bd-439a-4b17-9103-830033d4fdd9","Type":"ContainerStarted","Data":"0fca08f56b43ec2c9fc5525e081016af662eef49c0ff835c87ef22f6e6490844"} Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.445677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-c8b75d446-w2wqs" event={"ID":"1caf13bd-439a-4b17-9103-830033d4fdd9","Type":"ContainerStarted","Data":"36063d0019cf07e757bc246e633d0110d412e7a4f2584b77574e8669fc46c273"} Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.449845 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" event={"ID":"6a9e2b29-259a-4f14-8308-975a8d167ce3","Type":"ContainerStarted","Data":"802596c0edfc45f65df46cc4bae8e9aa1dc175a60d249e9357aee38ddf812268"} Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.451467 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-pjwht" event={"ID":"d4ef439a-e50d-43bd-87d1-86fea196f862","Type":"ContainerStarted","Data":"7b5821b83103e690c926d1c1e2c99e7e8bba3cf1b103b458096d0c0513446606"} Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.453109 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" event={"ID":"b84b6271-61b8-4bc0-8a1b-e09991c9e6af","Type":"ContainerStarted","Data":"911a7ba7608e76668352b84ff98cda6c7686fdcb9c16e0a3a84b530446e540f1"} Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.472495 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-c8b75d446-w2wqs" podStartSLOduration=1.472465753 podStartE2EDuration="1.472465753s" podCreationTimestamp="2025-11-25 18:24:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:24:49.463807496 +0000 UTC m=+719.849321111" watchObservedRunningTime="2025-11-25 18:24:49.472465753 +0000 UTC m=+719.857979358" Nov 25 18:24:49 crc kubenswrapper[4926]: I1125 18:24:49.569279 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp"] Nov 25 18:24:50 crc kubenswrapper[4926]: I1125 18:24:50.470071 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" event={"ID":"45e30b58-3691-41a3-afa2-ba29332f53aa","Type":"ContainerStarted","Data":"b28b14e07eff9362300d5fbf7e373fa8197cc195ced6cdca0d575b81953c15aa"} Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.500351 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" event={"ID":"b84b6271-61b8-4bc0-8a1b-e09991c9e6af","Type":"ContainerStarted","Data":"461fa78b0ef669775e37ce9d80f64ba9d9723f6bf4d23964fa37381b32a9811f"} Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.501221 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.501527 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" event={"ID":"45e30b58-3691-41a3-afa2-ba29332f53aa","Type":"ContainerStarted","Data":"ee47aa7597fa933d80dd8b626adb67f0cc5f68e5ac8e7a00a9f1040e64ee13cf"} Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.502476 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" event={"ID":"6a9e2b29-259a-4f14-8308-975a8d167ce3","Type":"ContainerStarted","Data":"3d3a67675e36630970f2a47bc29437ae62c7e8fd7b3572fec80ad7e9967a61e6"} Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.504776 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-pjwht" event={"ID":"d4ef439a-e50d-43bd-87d1-86fea196f862","Type":"ContainerStarted","Data":"bd265691efe3ead400a01392679c913e28d29ea61c18ec4b7eba9c38afe67436"} Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.504928 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.530517 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" podStartSLOduration=1.7814183319999999 podStartE2EDuration="6.53049953s" podCreationTimestamp="2025-11-25 18:24:48 +0000 UTC" firstStartedPulling="2025-11-25 18:24:49.388633523 +0000 UTC m=+719.774147128" lastFinishedPulling="2025-11-25 18:24:54.137714681 +0000 UTC m=+724.523228326" observedRunningTime="2025-11-25 18:24:54.529171333 +0000 UTC m=+724.914684938" watchObservedRunningTime="2025-11-25 18:24:54.53049953 +0000 UTC m=+724.916013135" Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.564982 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-ftcsp" podStartSLOduration=2.014585349 podStartE2EDuration="6.564956905s" podCreationTimestamp="2025-11-25 18:24:48 +0000 UTC" firstStartedPulling="2025-11-25 18:24:49.589768312 +0000 UTC m=+719.975281917" lastFinishedPulling="2025-11-25 18:24:54.140139868 +0000 UTC m=+724.525653473" observedRunningTime="2025-11-25 18:24:54.545621214 +0000 UTC m=+724.931134819" watchObservedRunningTime="2025-11-25 18:24:54.564956905 +0000 UTC m=+724.950470520" Nov 25 18:24:54 crc kubenswrapper[4926]: I1125 18:24:54.582719 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-pjwht" podStartSLOduration=1.025393696 podStartE2EDuration="6.582702342s" podCreationTimestamp="2025-11-25 18:24:48 +0000 UTC" firstStartedPulling="2025-11-25 18:24:48.583333716 +0000 UTC m=+718.968847321" lastFinishedPulling="2025-11-25 18:24:54.140642322 +0000 UTC m=+724.526155967" observedRunningTime="2025-11-25 18:24:54.581961922 +0000 UTC m=+724.967475527" watchObservedRunningTime="2025-11-25 18:24:54.582702342 +0000 UTC m=+724.968215947" Nov 25 18:24:56 crc kubenswrapper[4926]: I1125 18:24:56.521456 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" event={"ID":"6a9e2b29-259a-4f14-8308-975a8d167ce3","Type":"ContainerStarted","Data":"46282969a955e2380a7bca211ebc53468f0c5ce61c255900eaeaa624dfd296b9"} Nov 25 18:24:56 crc kubenswrapper[4926]: I1125 18:24:56.542753 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rmxx7" podStartSLOduration=1.244059105 podStartE2EDuration="8.542730116s" podCreationTimestamp="2025-11-25 18:24:48 +0000 UTC" firstStartedPulling="2025-11-25 18:24:49.030461434 +0000 UTC m=+719.415975039" lastFinishedPulling="2025-11-25 18:24:56.329132445 +0000 UTC m=+726.714646050" observedRunningTime="2025-11-25 18:24:56.540394812 +0000 UTC m=+726.925908677" watchObservedRunningTime="2025-11-25 18:24:56.542730116 +0000 UTC m=+726.928243721" Nov 25 18:24:58 crc kubenswrapper[4926]: I1125 18:24:58.914358 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:58 crc kubenswrapper[4926]: I1125 18:24:58.914848 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:58 crc kubenswrapper[4926]: I1125 18:24:58.922637 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:59 crc kubenswrapper[4926]: I1125 18:24:59.549967 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-c8b75d446-w2wqs" Nov 25 18:24:59 crc kubenswrapper[4926]: I1125 18:24:59.614059 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-597mc"] Nov 25 18:25:03 crc kubenswrapper[4926]: I1125 18:25:03.541357 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:25:03 crc kubenswrapper[4926]: I1125 18:25:03.543606 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:25:03 crc kubenswrapper[4926]: I1125 18:25:03.597764 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-pjwht" Nov 25 18:25:09 crc kubenswrapper[4926]: I1125 18:25:09.130174 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-gblp6" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.196008 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gzmf7"] Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.197072 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" podUID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" containerName="controller-manager" containerID="cri-o://f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52" gracePeriod=30 Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.299078 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw"] Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.299347 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" podUID="7c13f08b-2870-484a-a06b-e671feb57ac4" containerName="route-controller-manager" containerID="cri-o://b646afd9c46ed02427c6142bcb0b90c40289e89566f0952236bea38f460994ec" gracePeriod=30 Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.638923 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.676904 4926 generic.go:334] "Generic (PLEG): container finished" podID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" containerID="f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52" exitCode=0 Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.677030 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.677756 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" event={"ID":"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7","Type":"ContainerDied","Data":"f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52"} Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.677794 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gzmf7" event={"ID":"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7","Type":"ContainerDied","Data":"c716366dbb80fb85768ded628f37e048bfce8e03ece8dfbb55e74df861f41f97"} Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.677816 4926 scope.go:117] "RemoveContainer" containerID="f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.683019 4926 generic.go:334] "Generic (PLEG): container finished" podID="7c13f08b-2870-484a-a06b-e671feb57ac4" containerID="b646afd9c46ed02427c6142bcb0b90c40289e89566f0952236bea38f460994ec" exitCode=0 Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.683086 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" event={"ID":"7c13f08b-2870-484a-a06b-e671feb57ac4","Type":"ContainerDied","Data":"b646afd9c46ed02427c6142bcb0b90c40289e89566f0952236bea38f460994ec"} Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.710861 4926 scope.go:117] "RemoveContainer" containerID="f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52" Nov 25 18:25:14 crc kubenswrapper[4926]: E1125 18:25:14.723551 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52\": container with ID starting with f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52 not found: ID does not exist" containerID="f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.723606 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52"} err="failed to get container status \"f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52\": rpc error: code = NotFound desc = could not find container \"f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52\": container with ID starting with f20f8ce2ed18485c557877a195cfa384a49a35565336c36d97fa987a26d63d52 not found: ID does not exist" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.747085 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.752237 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-proxy-ca-bundles\") pod \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.752407 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fphc2\" (UniqueName: \"kubernetes.io/projected/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-kube-api-access-fphc2\") pod \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.752474 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-serving-cert\") pod \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.752568 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-client-ca\") pod \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.752620 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-config\") pod \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\" (UID: \"adb1a428-4c9e-4ac4-91a8-43ace3f49cb7\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.753952 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" (UID: "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.754452 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-client-ca" (OuterVolumeSpecName: "client-ca") pod "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" (UID: "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.754486 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-config" (OuterVolumeSpecName: "config") pod "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" (UID: "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.761388 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" (UID: "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.761689 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-kube-api-access-fphc2" (OuterVolumeSpecName: "kube-api-access-fphc2") pod "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" (UID: "adb1a428-4c9e-4ac4-91a8-43ace3f49cb7"). InnerVolumeSpecName "kube-api-access-fphc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.854573 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13f08b-2870-484a-a06b-e671feb57ac4-serving-cert\") pod \"7c13f08b-2870-484a-a06b-e671feb57ac4\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.854631 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-client-ca\") pod \"7c13f08b-2870-484a-a06b-e671feb57ac4\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.854667 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbjl6\" (UniqueName: \"kubernetes.io/projected/7c13f08b-2870-484a-a06b-e671feb57ac4-kube-api-access-tbjl6\") pod \"7c13f08b-2870-484a-a06b-e671feb57ac4\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.854846 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-config\") pod \"7c13f08b-2870-484a-a06b-e671feb57ac4\" (UID: \"7c13f08b-2870-484a-a06b-e671feb57ac4\") " Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.855214 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fphc2\" (UniqueName: \"kubernetes.io/projected/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-kube-api-access-fphc2\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.855241 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.855252 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.855263 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.855273 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.855942 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-client-ca" (OuterVolumeSpecName: "client-ca") pod "7c13f08b-2870-484a-a06b-e671feb57ac4" (UID: "7c13f08b-2870-484a-a06b-e671feb57ac4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.856003 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-config" (OuterVolumeSpecName: "config") pod "7c13f08b-2870-484a-a06b-e671feb57ac4" (UID: "7c13f08b-2870-484a-a06b-e671feb57ac4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.859631 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c13f08b-2870-484a-a06b-e671feb57ac4-kube-api-access-tbjl6" (OuterVolumeSpecName: "kube-api-access-tbjl6") pod "7c13f08b-2870-484a-a06b-e671feb57ac4" (UID: "7c13f08b-2870-484a-a06b-e671feb57ac4"). InnerVolumeSpecName "kube-api-access-tbjl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.860759 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c13f08b-2870-484a-a06b-e671feb57ac4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7c13f08b-2870-484a-a06b-e671feb57ac4" (UID: "7c13f08b-2870-484a-a06b-e671feb57ac4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.956693 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.956728 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c13f08b-2870-484a-a06b-e671feb57ac4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.956737 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c13f08b-2870-484a-a06b-e671feb57ac4-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:14 crc kubenswrapper[4926]: I1125 18:25:14.956747 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbjl6\" (UniqueName: \"kubernetes.io/projected/7c13f08b-2870-484a-a06b-e671feb57ac4-kube-api-access-tbjl6\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.013060 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gzmf7"] Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.017341 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gzmf7"] Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.693397 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" event={"ID":"7c13f08b-2870-484a-a06b-e671feb57ac4","Type":"ContainerDied","Data":"e350e93159c1f15587c9fec20503a325dd462b37dec016b697a2acedb2ceab7a"} Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.693466 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.693475 4926 scope.go:117] "RemoveContainer" containerID="b646afd9c46ed02427c6142bcb0b90c40289e89566f0952236bea38f460994ec" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.722679 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw"] Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.726175 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-9mnkw"] Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.825566 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj"] Nov 25 18:25:15 crc kubenswrapper[4926]: E1125 18:25:15.825841 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" containerName="controller-manager" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.825871 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" containerName="controller-manager" Nov 25 18:25:15 crc kubenswrapper[4926]: E1125 18:25:15.825887 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c13f08b-2870-484a-a06b-e671feb57ac4" containerName="route-controller-manager" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.825893 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c13f08b-2870-484a-a06b-e671feb57ac4" containerName="route-controller-manager" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.825991 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c13f08b-2870-484a-a06b-e671feb57ac4" containerName="route-controller-manager" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.826000 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" containerName="controller-manager" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.826440 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.832156 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.832467 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.832762 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.832994 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.833153 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.833510 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.850965 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj"] Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.871722 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41f2dfed-6945-41b4-a28b-c0d57989714b-config\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.871932 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41f2dfed-6945-41b4-a28b-c0d57989714b-serving-cert\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.872123 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41f2dfed-6945-41b4-a28b-c0d57989714b-client-ca\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.872162 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghdvk\" (UniqueName: \"kubernetes.io/projected/41f2dfed-6945-41b4-a28b-c0d57989714b-kube-api-access-ghdvk\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.973975 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41f2dfed-6945-41b4-a28b-c0d57989714b-config\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.974441 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41f2dfed-6945-41b4-a28b-c0d57989714b-serving-cert\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.975539 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41f2dfed-6945-41b4-a28b-c0d57989714b-client-ca\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.975718 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghdvk\" (UniqueName: \"kubernetes.io/projected/41f2dfed-6945-41b4-a28b-c0d57989714b-kube-api-access-ghdvk\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.976031 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41f2dfed-6945-41b4-a28b-c0d57989714b-config\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.976910 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41f2dfed-6945-41b4-a28b-c0d57989714b-client-ca\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:15 crc kubenswrapper[4926]: I1125 18:25:15.984145 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41f2dfed-6945-41b4-a28b-c0d57989714b-serving-cert\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.002431 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghdvk\" (UniqueName: \"kubernetes.io/projected/41f2dfed-6945-41b4-a28b-c0d57989714b-kube-api-access-ghdvk\") pod \"route-controller-manager-7cb79694b6-fbrqj\" (UID: \"41f2dfed-6945-41b4-a28b-c0d57989714b\") " pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.082838 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5f688c6b94-rxxnn"] Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.084289 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.088477 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f688c6b94-rxxnn"] Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.092303 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.092328 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.092893 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.093023 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.093089 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.093258 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.120481 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.142070 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.281487 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-config\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.283264 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r86kp\" (UniqueName: \"kubernetes.io/projected/13668c10-93bb-4198-a221-bee2b2ef685b-kube-api-access-r86kp\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.283357 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-proxy-ca-bundles\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.283408 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-client-ca\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.283476 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13668c10-93bb-4198-a221-bee2b2ef685b-serving-cert\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.344879 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c13f08b-2870-484a-a06b-e671feb57ac4" path="/var/lib/kubelet/pods/7c13f08b-2870-484a-a06b-e671feb57ac4/volumes" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.345688 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adb1a428-4c9e-4ac4-91a8-43ace3f49cb7" path="/var/lib/kubelet/pods/adb1a428-4c9e-4ac4-91a8-43ace3f49cb7/volumes" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.376013 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj"] Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.389182 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13668c10-93bb-4198-a221-bee2b2ef685b-serving-cert\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.389318 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-config\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.389364 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r86kp\" (UniqueName: \"kubernetes.io/projected/13668c10-93bb-4198-a221-bee2b2ef685b-kube-api-access-r86kp\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.389492 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-proxy-ca-bundles\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.389543 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-client-ca\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.390698 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-client-ca\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.392123 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-proxy-ca-bundles\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.393392 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13668c10-93bb-4198-a221-bee2b2ef685b-config\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.394387 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13668c10-93bb-4198-a221-bee2b2ef685b-serving-cert\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.412328 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r86kp\" (UniqueName: \"kubernetes.io/projected/13668c10-93bb-4198-a221-bee2b2ef685b-kube-api-access-r86kp\") pod \"controller-manager-5f688c6b94-rxxnn\" (UID: \"13668c10-93bb-4198-a221-bee2b2ef685b\") " pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.426787 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.638063 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f688c6b94-rxxnn"] Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.702757 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" event={"ID":"13668c10-93bb-4198-a221-bee2b2ef685b","Type":"ContainerStarted","Data":"424865b417a34f1a464369c073e31b335e42290396ddbd1813e388005e8a4f51"} Nov 25 18:25:16 crc kubenswrapper[4926]: I1125 18:25:16.703982 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" event={"ID":"41f2dfed-6945-41b4-a28b-c0d57989714b","Type":"ContainerStarted","Data":"f075b82e5c5c645f0e37055acb4b0341a1b425cbc6a117bd37f9a152c00baf2d"} Nov 25 18:25:17 crc kubenswrapper[4926]: I1125 18:25:17.723853 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" event={"ID":"13668c10-93bb-4198-a221-bee2b2ef685b","Type":"ContainerStarted","Data":"0d9f56399528f654f4e3fca7a64031d4b909cad493dd7f216a1e4b66acd6b753"} Nov 25 18:25:17 crc kubenswrapper[4926]: I1125 18:25:17.724602 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:17 crc kubenswrapper[4926]: I1125 18:25:17.730859 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 18:25:17 crc kubenswrapper[4926]: I1125 18:25:17.734120 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" event={"ID":"41f2dfed-6945-41b4-a28b-c0d57989714b","Type":"ContainerStarted","Data":"8f1067fe627454344a10f2555c3ed971ffe3d6abd3715217b62f43e0610ee040"} Nov 25 18:25:17 crc kubenswrapper[4926]: I1125 18:25:17.737036 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:17 crc kubenswrapper[4926]: I1125 18:25:17.771750 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podStartSLOduration=3.771720568 podStartE2EDuration="3.771720568s" podCreationTimestamp="2025-11-25 18:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:25:17.749781936 +0000 UTC m=+748.135295541" watchObservedRunningTime="2025-11-25 18:25:17.771720568 +0000 UTC m=+748.157234173" Nov 25 18:25:17 crc kubenswrapper[4926]: I1125 18:25:17.773885 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" podStartSLOduration=2.773871218 podStartE2EDuration="2.773871218s" podCreationTimestamp="2025-11-25 18:25:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:25:17.768072689 +0000 UTC m=+748.153586294" watchObservedRunningTime="2025-11-25 18:25:17.773871218 +0000 UTC m=+748.159384843" Nov 25 18:25:18 crc kubenswrapper[4926]: I1125 18:25:18.016320 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7cb79694b6-fbrqj" Nov 25 18:25:24 crc kubenswrapper[4926]: I1125 18:25:24.663762 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-597mc" podUID="e67bd5e5-a3c9-4576-93e6-6d7073142160" containerName="console" containerID="cri-o://1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287" gracePeriod=15 Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.209598 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-597mc_e67bd5e5-a3c9-4576-93e6-6d7073142160/console/0.log" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.210051 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.247086 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvvjj\" (UniqueName: \"kubernetes.io/projected/e67bd5e5-a3c9-4576-93e6-6d7073142160-kube-api-access-cvvjj\") pod \"e67bd5e5-a3c9-4576-93e6-6d7073142160\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.247165 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-serving-cert\") pod \"e67bd5e5-a3c9-4576-93e6-6d7073142160\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.247222 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-oauth-config\") pod \"e67bd5e5-a3c9-4576-93e6-6d7073142160\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.247245 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-config\") pod \"e67bd5e5-a3c9-4576-93e6-6d7073142160\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.247275 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-trusted-ca-bundle\") pod \"e67bd5e5-a3c9-4576-93e6-6d7073142160\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.247436 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-oauth-serving-cert\") pod \"e67bd5e5-a3c9-4576-93e6-6d7073142160\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.248200 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-config" (OuterVolumeSpecName: "console-config") pod "e67bd5e5-a3c9-4576-93e6-6d7073142160" (UID: "e67bd5e5-a3c9-4576-93e6-6d7073142160"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.248215 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "e67bd5e5-a3c9-4576-93e6-6d7073142160" (UID: "e67bd5e5-a3c9-4576-93e6-6d7073142160"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.248219 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "e67bd5e5-a3c9-4576-93e6-6d7073142160" (UID: "e67bd5e5-a3c9-4576-93e6-6d7073142160"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.248269 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-service-ca\") pod \"e67bd5e5-a3c9-4576-93e6-6d7073142160\" (UID: \"e67bd5e5-a3c9-4576-93e6-6d7073142160\") " Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.248530 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-service-ca" (OuterVolumeSpecName: "service-ca") pod "e67bd5e5-a3c9-4576-93e6-6d7073142160" (UID: "e67bd5e5-a3c9-4576-93e6-6d7073142160"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.248974 4926 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.248994 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.249004 4926 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.249015 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e67bd5e5-a3c9-4576-93e6-6d7073142160-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.258543 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "e67bd5e5-a3c9-4576-93e6-6d7073142160" (UID: "e67bd5e5-a3c9-4576-93e6-6d7073142160"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.263402 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "e67bd5e5-a3c9-4576-93e6-6d7073142160" (UID: "e67bd5e5-a3c9-4576-93e6-6d7073142160"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.268721 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e67bd5e5-a3c9-4576-93e6-6d7073142160-kube-api-access-cvvjj" (OuterVolumeSpecName: "kube-api-access-cvvjj") pod "e67bd5e5-a3c9-4576-93e6-6d7073142160" (UID: "e67bd5e5-a3c9-4576-93e6-6d7073142160"). InnerVolumeSpecName "kube-api-access-cvvjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.350645 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvvjj\" (UniqueName: \"kubernetes.io/projected/e67bd5e5-a3c9-4576-93e6-6d7073142160-kube-api-access-cvvjj\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.350686 4926 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.350696 4926 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e67bd5e5-a3c9-4576-93e6-6d7073142160-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.794964 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-597mc_e67bd5e5-a3c9-4576-93e6-6d7073142160/console/0.log" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.795025 4926 generic.go:334] "Generic (PLEG): container finished" podID="e67bd5e5-a3c9-4576-93e6-6d7073142160" containerID="1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287" exitCode=2 Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.795080 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-597mc" event={"ID":"e67bd5e5-a3c9-4576-93e6-6d7073142160","Type":"ContainerDied","Data":"1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287"} Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.795116 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-597mc" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.795139 4926 scope.go:117] "RemoveContainer" containerID="1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.795122 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-597mc" event={"ID":"e67bd5e5-a3c9-4576-93e6-6d7073142160","Type":"ContainerDied","Data":"7e8c00e1d22bcb05f3bb0c40bfcf4c745ce7dbcf2a8fe9c5a632de959accb9d0"} Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.822467 4926 scope.go:117] "RemoveContainer" containerID="1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287" Nov 25 18:25:25 crc kubenswrapper[4926]: E1125 18:25:25.823361 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287\": container with ID starting with 1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287 not found: ID does not exist" containerID="1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.823441 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287"} err="failed to get container status \"1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287\": rpc error: code = NotFound desc = could not find container \"1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287\": container with ID starting with 1d1019ef6e932170eee251b7b3565cf86cd34613a9a75147012e2a013a2c2287 not found: ID does not exist" Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.834857 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-597mc"] Nov 25 18:25:25 crc kubenswrapper[4926]: I1125 18:25:25.838480 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-597mc"] Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.127631 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r"] Nov 25 18:25:26 crc kubenswrapper[4926]: E1125 18:25:26.128024 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e67bd5e5-a3c9-4576-93e6-6d7073142160" containerName="console" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.128046 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e67bd5e5-a3c9-4576-93e6-6d7073142160" containerName="console" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.128237 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e67bd5e5-a3c9-4576-93e6-6d7073142160" containerName="console" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.129692 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.133069 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.139675 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r"] Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.162630 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.162718 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frmkw\" (UniqueName: \"kubernetes.io/projected/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-kube-api-access-frmkw\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.162763 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.264506 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frmkw\" (UniqueName: \"kubernetes.io/projected/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-kube-api-access-frmkw\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.264576 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.264630 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.265281 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.265323 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.286216 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frmkw\" (UniqueName: \"kubernetes.io/projected/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-kube-api-access-frmkw\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.343413 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e67bd5e5-a3c9-4576-93e6-6d7073142160" path="/var/lib/kubelet/pods/e67bd5e5-a3c9-4576-93e6-6d7073142160/volumes" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.446984 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.493782 4926 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 18:25:26 crc kubenswrapper[4926]: I1125 18:25:26.872704 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r"] Nov 25 18:25:27 crc kubenswrapper[4926]: I1125 18:25:27.819933 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerID="ac1e1a305bcd1048bea488d7850076e98da545983089c501f345ace7b0f9643f" exitCode=0 Nov 25 18:25:27 crc kubenswrapper[4926]: I1125 18:25:27.820003 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" event={"ID":"8b777648-39b0-4f4a-a63e-b4ee135ee3cd","Type":"ContainerDied","Data":"ac1e1a305bcd1048bea488d7850076e98da545983089c501f345ace7b0f9643f"} Nov 25 18:25:27 crc kubenswrapper[4926]: I1125 18:25:27.820295 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" event={"ID":"8b777648-39b0-4f4a-a63e-b4ee135ee3cd","Type":"ContainerStarted","Data":"0bd11640cc5e2449050a8418aac963e46362f8fd087f4992497b94004de7e521"} Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.474540 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2xgkn"] Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.476216 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.532507 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-catalog-content\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.532944 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76csj\" (UniqueName: \"kubernetes.io/projected/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-kube-api-access-76csj\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.533092 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-utilities\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.538397 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2xgkn"] Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.634550 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-utilities\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.634607 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-catalog-content\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.634667 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76csj\" (UniqueName: \"kubernetes.io/projected/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-kube-api-access-76csj\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.635225 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-utilities\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.635364 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-catalog-content\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.663318 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76csj\" (UniqueName: \"kubernetes.io/projected/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-kube-api-access-76csj\") pod \"redhat-operators-2xgkn\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.795632 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.864878 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerID="81ac2906ca3e6a1b7ba507b41017ce6e3c81a2c1b584cefad3fbaac9366bfc8b" exitCode=0 Nov 25 18:25:29 crc kubenswrapper[4926]: I1125 18:25:29.865423 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" event={"ID":"8b777648-39b0-4f4a-a63e-b4ee135ee3cd","Type":"ContainerDied","Data":"81ac2906ca3e6a1b7ba507b41017ce6e3c81a2c1b584cefad3fbaac9366bfc8b"} Nov 25 18:25:30 crc kubenswrapper[4926]: I1125 18:25:30.291112 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2xgkn"] Nov 25 18:25:30 crc kubenswrapper[4926]: I1125 18:25:30.873355 4926 generic.go:334] "Generic (PLEG): container finished" podID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerID="7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7" exitCode=0 Nov 25 18:25:30 crc kubenswrapper[4926]: I1125 18:25:30.873427 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgkn" event={"ID":"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d","Type":"ContainerDied","Data":"7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7"} Nov 25 18:25:30 crc kubenswrapper[4926]: I1125 18:25:30.873500 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgkn" event={"ID":"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d","Type":"ContainerStarted","Data":"ae057bd3b064764b4afb08caddbbefb46301b4d8e4f2ff452660eee4a3c185da"} Nov 25 18:25:30 crc kubenswrapper[4926]: I1125 18:25:30.878803 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerID="4efc0408841e8ce6341394858611fb5f15cb9470538fe147cbad7017040b03c5" exitCode=0 Nov 25 18:25:30 crc kubenswrapper[4926]: I1125 18:25:30.878854 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" event={"ID":"8b777648-39b0-4f4a-a63e-b4ee135ee3cd","Type":"ContainerDied","Data":"4efc0408841e8ce6341394858611fb5f15cb9470538fe147cbad7017040b03c5"} Nov 25 18:25:31 crc kubenswrapper[4926]: I1125 18:25:31.889705 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgkn" event={"ID":"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d","Type":"ContainerStarted","Data":"17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df"} Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.322070 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.381043 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-bundle\") pod \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.381171 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-util\") pod \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.381265 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frmkw\" (UniqueName: \"kubernetes.io/projected/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-kube-api-access-frmkw\") pod \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\" (UID: \"8b777648-39b0-4f4a-a63e-b4ee135ee3cd\") " Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.382594 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-bundle" (OuterVolumeSpecName: "bundle") pod "8b777648-39b0-4f4a-a63e-b4ee135ee3cd" (UID: "8b777648-39b0-4f4a-a63e-b4ee135ee3cd"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.390727 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-kube-api-access-frmkw" (OuterVolumeSpecName: "kube-api-access-frmkw") pod "8b777648-39b0-4f4a-a63e-b4ee135ee3cd" (UID: "8b777648-39b0-4f4a-a63e-b4ee135ee3cd"). InnerVolumeSpecName "kube-api-access-frmkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.442544 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-util" (OuterVolumeSpecName: "util") pod "8b777648-39b0-4f4a-a63e-b4ee135ee3cd" (UID: "8b777648-39b0-4f4a-a63e-b4ee135ee3cd"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.483258 4926 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.483310 4926 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-util\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.483331 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frmkw\" (UniqueName: \"kubernetes.io/projected/8b777648-39b0-4f4a-a63e-b4ee135ee3cd-kube-api-access-frmkw\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.904235 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" event={"ID":"8b777648-39b0-4f4a-a63e-b4ee135ee3cd","Type":"ContainerDied","Data":"0bd11640cc5e2449050a8418aac963e46362f8fd087f4992497b94004de7e521"} Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.904293 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bd11640cc5e2449050a8418aac963e46362f8fd087f4992497b94004de7e521" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.904330 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r" Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.907274 4926 generic.go:334] "Generic (PLEG): container finished" podID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerID="17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df" exitCode=0 Nov 25 18:25:32 crc kubenswrapper[4926]: I1125 18:25:32.907350 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgkn" event={"ID":"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d","Type":"ContainerDied","Data":"17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df"} Nov 25 18:25:33 crc kubenswrapper[4926]: I1125 18:25:33.542133 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:25:33 crc kubenswrapper[4926]: I1125 18:25:33.542228 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:25:33 crc kubenswrapper[4926]: I1125 18:25:33.920989 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgkn" event={"ID":"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d","Type":"ContainerStarted","Data":"38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a"} Nov 25 18:25:33 crc kubenswrapper[4926]: I1125 18:25:33.944482 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2xgkn" podStartSLOduration=2.375473662 podStartE2EDuration="4.944458546s" podCreationTimestamp="2025-11-25 18:25:29 +0000 UTC" firstStartedPulling="2025-11-25 18:25:30.876077828 +0000 UTC m=+761.261591433" lastFinishedPulling="2025-11-25 18:25:33.445062702 +0000 UTC m=+763.830576317" observedRunningTime="2025-11-25 18:25:33.941929946 +0000 UTC m=+764.327443571" watchObservedRunningTime="2025-11-25 18:25:33.944458546 +0000 UTC m=+764.329972161" Nov 25 18:25:39 crc kubenswrapper[4926]: I1125 18:25:39.796235 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:39 crc kubenswrapper[4926]: I1125 18:25:39.796724 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:39 crc kubenswrapper[4926]: I1125 18:25:39.839514 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:40 crc kubenswrapper[4926]: I1125 18:25:40.011636 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.238490 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8"] Nov 25 18:25:43 crc kubenswrapper[4926]: E1125 18:25:43.239510 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerName="pull" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.239524 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerName="pull" Nov 25 18:25:43 crc kubenswrapper[4926]: E1125 18:25:43.239542 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerName="util" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.239548 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerName="util" Nov 25 18:25:43 crc kubenswrapper[4926]: E1125 18:25:43.239559 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerName="extract" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.239565 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerName="extract" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.239671 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b777648-39b0-4f4a-a63e-b4ee135ee3cd" containerName="extract" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.240090 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.242106 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.242796 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.242918 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.243558 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.252391 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-7ss9r" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.265823 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2xgkn"] Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.266091 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2xgkn" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="registry-server" containerID="cri-o://38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a" gracePeriod=2 Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.283811 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8"] Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.366578 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2ac11a24-0681-41d4-b943-8bf5b5396a40-webhook-cert\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.366869 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmh6m\" (UniqueName: \"kubernetes.io/projected/2ac11a24-0681-41d4-b943-8bf5b5396a40-kube-api-access-tmh6m\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.367138 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2ac11a24-0681-41d4-b943-8bf5b5396a40-apiservice-cert\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.468681 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2ac11a24-0681-41d4-b943-8bf5b5396a40-apiservice-cert\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.469041 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2ac11a24-0681-41d4-b943-8bf5b5396a40-webhook-cert\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.469144 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmh6m\" (UniqueName: \"kubernetes.io/projected/2ac11a24-0681-41d4-b943-8bf5b5396a40-kube-api-access-tmh6m\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.482514 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2ac11a24-0681-41d4-b943-8bf5b5396a40-apiservice-cert\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.494060 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2ac11a24-0681-41d4-b943-8bf5b5396a40-webhook-cert\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.494646 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmh6m\" (UniqueName: \"kubernetes.io/projected/2ac11a24-0681-41d4-b943-8bf5b5396a40-kube-api-access-tmh6m\") pod \"metallb-operator-controller-manager-5c55bddd9c-5nmb8\" (UID: \"2ac11a24-0681-41d4-b943-8bf5b5396a40\") " pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.556087 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.708070 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29"] Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.709247 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.719334 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.719699 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.719806 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-2jhmx" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.729908 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29"] Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.778810 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-webhook-cert\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.778907 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tws5l\" (UniqueName: \"kubernetes.io/projected/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-kube-api-access-tws5l\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.778943 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-apiservice-cert\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.880562 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tws5l\" (UniqueName: \"kubernetes.io/projected/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-kube-api-access-tws5l\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.880617 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-apiservice-cert\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.880678 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-webhook-cert\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.887543 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-webhook-cert\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.888872 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-apiservice-cert\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:43 crc kubenswrapper[4926]: I1125 18:25:43.898529 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tws5l\" (UniqueName: \"kubernetes.io/projected/ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0-kube-api-access-tws5l\") pod \"metallb-operator-webhook-server-69bf9645b5-fww29\" (UID: \"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0\") " pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.039170 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.068935 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8"] Nov 25 18:25:44 crc kubenswrapper[4926]: W1125 18:25:44.102233 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ac11a24_0681_41d4_b943_8bf5b5396a40.slice/crio-da3e8d6a66adf0ac772528f0dbccf11ec83d7088baa2783be8dab774888f4fb2 WatchSource:0}: Error finding container da3e8d6a66adf0ac772528f0dbccf11ec83d7088baa2783be8dab774888f4fb2: Status 404 returned error can't find the container with id da3e8d6a66adf0ac772528f0dbccf11ec83d7088baa2783be8dab774888f4fb2 Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.464546 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29"] Nov 25 18:25:44 crc kubenswrapper[4926]: W1125 18:25:44.473824 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce5ab3f1_def6_4a51_8a7a_4511cfe62bf0.slice/crio-1462e5c0a2f6420a360c271f0e081fd82b47651a1a6adb3195204c8b8e54c3a4 WatchSource:0}: Error finding container 1462e5c0a2f6420a360c271f0e081fd82b47651a1a6adb3195204c8b8e54c3a4: Status 404 returned error can't find the container with id 1462e5c0a2f6420a360c271f0e081fd82b47651a1a6adb3195204c8b8e54c3a4 Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.940258 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.993687 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-catalog-content\") pod \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.993801 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-utilities\") pod \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.993870 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76csj\" (UniqueName: \"kubernetes.io/projected/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-kube-api-access-76csj\") pod \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\" (UID: \"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d\") " Nov 25 18:25:44 crc kubenswrapper[4926]: I1125 18:25:44.995349 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-utilities" (OuterVolumeSpecName: "utilities") pod "84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" (UID: "84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.004010 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-kube-api-access-76csj" (OuterVolumeSpecName: "kube-api-access-76csj") pod "84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" (UID: "84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d"). InnerVolumeSpecName "kube-api-access-76csj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.006298 4926 generic.go:334] "Generic (PLEG): container finished" podID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerID="38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a" exitCode=0 Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.006387 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgkn" event={"ID":"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d","Type":"ContainerDied","Data":"38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a"} Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.006425 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgkn" event={"ID":"84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d","Type":"ContainerDied","Data":"ae057bd3b064764b4afb08caddbbefb46301b4d8e4f2ff452660eee4a3c185da"} Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.006446 4926 scope.go:117] "RemoveContainer" containerID="38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.006588 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xgkn" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.009600 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" event={"ID":"2ac11a24-0681-41d4-b943-8bf5b5396a40","Type":"ContainerStarted","Data":"da3e8d6a66adf0ac772528f0dbccf11ec83d7088baa2783be8dab774888f4fb2"} Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.016055 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" event={"ID":"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0","Type":"ContainerStarted","Data":"1462e5c0a2f6420a360c271f0e081fd82b47651a1a6adb3195204c8b8e54c3a4"} Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.033161 4926 scope.go:117] "RemoveContainer" containerID="17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.055976 4926 scope.go:117] "RemoveContainer" containerID="7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.077409 4926 scope.go:117] "RemoveContainer" containerID="38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a" Nov 25 18:25:45 crc kubenswrapper[4926]: E1125 18:25:45.077865 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a\": container with ID starting with 38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a not found: ID does not exist" containerID="38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.077895 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a"} err="failed to get container status \"38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a\": rpc error: code = NotFound desc = could not find container \"38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a\": container with ID starting with 38ef0a759b5ab02316d8ed46576a9bfae91f8b94816cf5681a31da2a1c1dbf7a not found: ID does not exist" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.077916 4926 scope.go:117] "RemoveContainer" containerID="17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df" Nov 25 18:25:45 crc kubenswrapper[4926]: E1125 18:25:45.078278 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df\": container with ID starting with 17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df not found: ID does not exist" containerID="17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.078297 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df"} err="failed to get container status \"17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df\": rpc error: code = NotFound desc = could not find container \"17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df\": container with ID starting with 17bd0ce2aad183eba029dc51eb117fec62013dcfa115985b019a4016a1e9e6df not found: ID does not exist" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.078308 4926 scope.go:117] "RemoveContainer" containerID="7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7" Nov 25 18:25:45 crc kubenswrapper[4926]: E1125 18:25:45.078550 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7\": container with ID starting with 7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7 not found: ID does not exist" containerID="7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.078565 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7"} err="failed to get container status \"7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7\": rpc error: code = NotFound desc = could not find container \"7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7\": container with ID starting with 7950f036c010ac900301318ce670a7a1ee4a64e6cf3016312f33f230d4dd1cd7 not found: ID does not exist" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.095215 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.095282 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76csj\" (UniqueName: \"kubernetes.io/projected/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-kube-api-access-76csj\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.125933 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" (UID: "84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.201817 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.337835 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2xgkn"] Nov 25 18:25:45 crc kubenswrapper[4926]: I1125 18:25:45.343360 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2xgkn"] Nov 25 18:25:46 crc kubenswrapper[4926]: I1125 18:25:46.351725 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" path="/var/lib/kubelet/pods/84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d/volumes" Nov 25 18:25:50 crc kubenswrapper[4926]: I1125 18:25:50.052916 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" event={"ID":"2ac11a24-0681-41d4-b943-8bf5b5396a40","Type":"ContainerStarted","Data":"d6632cfdd84fbaaf1d498e5e9ca0d8acba4a3a5af14fa8361648562c9806ecf6"} Nov 25 18:25:50 crc kubenswrapper[4926]: I1125 18:25:50.054290 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:25:50 crc kubenswrapper[4926]: I1125 18:25:50.055063 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" event={"ID":"ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0","Type":"ContainerStarted","Data":"e30b5b9a51ba0f47d091a878edfedcc7a75b4593ff9a94ba06a25afbf56639ef"} Nov 25 18:25:50 crc kubenswrapper[4926]: I1125 18:25:50.055245 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:25:50 crc kubenswrapper[4926]: I1125 18:25:50.107309 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" podStartSLOduration=2.143930627 podStartE2EDuration="7.107286103s" podCreationTimestamp="2025-11-25 18:25:43 +0000 UTC" firstStartedPulling="2025-11-25 18:25:44.478338349 +0000 UTC m=+774.863851954" lastFinishedPulling="2025-11-25 18:25:49.441693825 +0000 UTC m=+779.827207430" observedRunningTime="2025-11-25 18:25:50.103619622 +0000 UTC m=+780.489133227" watchObservedRunningTime="2025-11-25 18:25:50.107286103 +0000 UTC m=+780.492799708" Nov 25 18:25:50 crc kubenswrapper[4926]: I1125 18:25:50.108658 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" podStartSLOduration=1.7914952149999999 podStartE2EDuration="7.10864768s" podCreationTimestamp="2025-11-25 18:25:43 +0000 UTC" firstStartedPulling="2025-11-25 18:25:44.106174517 +0000 UTC m=+774.491688122" lastFinishedPulling="2025-11-25 18:25:49.423326982 +0000 UTC m=+779.808840587" observedRunningTime="2025-11-25 18:25:50.082792002 +0000 UTC m=+780.468305607" watchObservedRunningTime="2025-11-25 18:25:50.10864768 +0000 UTC m=+780.494161285" Nov 25 18:26:03 crc kubenswrapper[4926]: I1125 18:26:03.541715 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:26:03 crc kubenswrapper[4926]: I1125 18:26:03.542485 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:26:03 crc kubenswrapper[4926]: I1125 18:26:03.542531 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:26:03 crc kubenswrapper[4926]: I1125 18:26:03.543125 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fe9604ac0593158a6b911432198090e6a5ba75c5f094643a6db976009ac5d9c3"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:26:03 crc kubenswrapper[4926]: I1125 18:26:03.543182 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://fe9604ac0593158a6b911432198090e6a5ba75c5f094643a6db976009ac5d9c3" gracePeriod=600 Nov 25 18:26:04 crc kubenswrapper[4926]: I1125 18:26:04.044766 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-69bf9645b5-fww29" Nov 25 18:26:04 crc kubenswrapper[4926]: I1125 18:26:04.152213 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="fe9604ac0593158a6b911432198090e6a5ba75c5f094643a6db976009ac5d9c3" exitCode=0 Nov 25 18:26:04 crc kubenswrapper[4926]: I1125 18:26:04.152274 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"fe9604ac0593158a6b911432198090e6a5ba75c5f094643a6db976009ac5d9c3"} Nov 25 18:26:04 crc kubenswrapper[4926]: I1125 18:26:04.152308 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"2a18d78481ed56ee9bbb8c78eb19b76e596d9f89645390e0fbbcd5362fea71a4"} Nov 25 18:26:04 crc kubenswrapper[4926]: I1125 18:26:04.152331 4926 scope.go:117] "RemoveContainer" containerID="4780239f9864310c55b02fee7ce2fe9b6cc7572aec239cace54a7899ca327d6e" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.186670 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ks968"] Nov 25 18:26:13 crc kubenswrapper[4926]: E1125 18:26:13.187990 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="extract-content" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.188010 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="extract-content" Nov 25 18:26:13 crc kubenswrapper[4926]: E1125 18:26:13.188030 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="extract-utilities" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.188038 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="extract-utilities" Nov 25 18:26:13 crc kubenswrapper[4926]: E1125 18:26:13.188061 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="registry-server" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.188069 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="registry-server" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.188225 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="84162bb2-3dcb-4b0e-b394-0f2c4b77fd0d" containerName="registry-server" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.189334 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.216256 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ks968"] Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.234852 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-catalog-content\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.235028 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-utilities\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.235116 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvjg5\" (UniqueName: \"kubernetes.io/projected/5e60091b-d6b5-481a-82ab-36849df8e5fb-kube-api-access-tvjg5\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.336278 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-catalog-content\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.336364 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-utilities\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.336412 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvjg5\" (UniqueName: \"kubernetes.io/projected/5e60091b-d6b5-481a-82ab-36849df8e5fb-kube-api-access-tvjg5\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.337019 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-catalog-content\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.337019 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-utilities\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.356591 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvjg5\" (UniqueName: \"kubernetes.io/projected/5e60091b-d6b5-481a-82ab-36849df8e5fb-kube-api-access-tvjg5\") pod \"community-operators-ks968\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:13 crc kubenswrapper[4926]: I1125 18:26:13.544567 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.054864 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ks968"] Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.234631 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ks968" event={"ID":"5e60091b-d6b5-481a-82ab-36849df8e5fb","Type":"ContainerStarted","Data":"fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab"} Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.235210 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ks968" event={"ID":"5e60091b-d6b5-481a-82ab-36849df8e5fb","Type":"ContainerStarted","Data":"85214d4412075805fde3509cfd3898a74d6c9400867d7c3ad1ff232e0411008e"} Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.578510 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rzl7m"] Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.580310 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.594462 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rzl7m"] Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.755300 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-catalog-content\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.755360 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x27zj\" (UniqueName: \"kubernetes.io/projected/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-kube-api-access-x27zj\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.755737 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-utilities\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.857016 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-utilities\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.857140 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-catalog-content\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.857175 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x27zj\" (UniqueName: \"kubernetes.io/projected/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-kube-api-access-x27zj\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.857668 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-utilities\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.857776 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-catalog-content\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.890436 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x27zj\" (UniqueName: \"kubernetes.io/projected/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-kube-api-access-x27zj\") pod \"redhat-marketplace-rzl7m\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:14 crc kubenswrapper[4926]: I1125 18:26:14.897989 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:15 crc kubenswrapper[4926]: I1125 18:26:15.129173 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rzl7m"] Nov 25 18:26:15 crc kubenswrapper[4926]: I1125 18:26:15.242517 4926 generic.go:334] "Generic (PLEG): container finished" podID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerID="fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab" exitCode=0 Nov 25 18:26:15 crc kubenswrapper[4926]: I1125 18:26:15.242604 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ks968" event={"ID":"5e60091b-d6b5-481a-82ab-36849df8e5fb","Type":"ContainerDied","Data":"fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab"} Nov 25 18:26:15 crc kubenswrapper[4926]: I1125 18:26:15.247041 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rzl7m" event={"ID":"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2","Type":"ContainerStarted","Data":"0e05074062dc1d158bb8efbe20d593c211b91c0ef1ebcf4a61aa23844be9befe"} Nov 25 18:26:16 crc kubenswrapper[4926]: I1125 18:26:16.256212 4926 generic.go:334] "Generic (PLEG): container finished" podID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerID="e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6" exitCode=0 Nov 25 18:26:16 crc kubenswrapper[4926]: I1125 18:26:16.256336 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ks968" event={"ID":"5e60091b-d6b5-481a-82ab-36849df8e5fb","Type":"ContainerDied","Data":"e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6"} Nov 25 18:26:16 crc kubenswrapper[4926]: I1125 18:26:16.257902 4926 generic.go:334] "Generic (PLEG): container finished" podID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerID="0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d" exitCode=0 Nov 25 18:26:16 crc kubenswrapper[4926]: I1125 18:26:16.257957 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rzl7m" event={"ID":"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2","Type":"ContainerDied","Data":"0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d"} Nov 25 18:26:17 crc kubenswrapper[4926]: I1125 18:26:17.270906 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ks968" event={"ID":"5e60091b-d6b5-481a-82ab-36849df8e5fb","Type":"ContainerStarted","Data":"ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236"} Nov 25 18:26:17 crc kubenswrapper[4926]: I1125 18:26:17.273833 4926 generic.go:334] "Generic (PLEG): container finished" podID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerID="d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4" exitCode=0 Nov 25 18:26:17 crc kubenswrapper[4926]: I1125 18:26:17.273908 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rzl7m" event={"ID":"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2","Type":"ContainerDied","Data":"d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4"} Nov 25 18:26:17 crc kubenswrapper[4926]: I1125 18:26:17.317907 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ks968" podStartSLOduration=2.865832068 podStartE2EDuration="4.317886166s" podCreationTimestamp="2025-11-25 18:26:13 +0000 UTC" firstStartedPulling="2025-11-25 18:26:15.244653126 +0000 UTC m=+805.630166731" lastFinishedPulling="2025-11-25 18:26:16.696707184 +0000 UTC m=+807.082220829" observedRunningTime="2025-11-25 18:26:17.296570181 +0000 UTC m=+807.682083796" watchObservedRunningTime="2025-11-25 18:26:17.317886166 +0000 UTC m=+807.703399761" Nov 25 18:26:19 crc kubenswrapper[4926]: I1125 18:26:19.287954 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rzl7m" event={"ID":"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2","Type":"ContainerStarted","Data":"0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c"} Nov 25 18:26:19 crc kubenswrapper[4926]: I1125 18:26:19.309955 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rzl7m" podStartSLOduration=3.121767172 podStartE2EDuration="5.309927141s" podCreationTimestamp="2025-11-25 18:26:14 +0000 UTC" firstStartedPulling="2025-11-25 18:26:16.260602655 +0000 UTC m=+806.646116260" lastFinishedPulling="2025-11-25 18:26:18.448762614 +0000 UTC m=+808.834276229" observedRunningTime="2025-11-25 18:26:19.307050412 +0000 UTC m=+809.692564027" watchObservedRunningTime="2025-11-25 18:26:19.309927141 +0000 UTC m=+809.695440746" Nov 25 18:26:23 crc kubenswrapper[4926]: I1125 18:26:23.544915 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:23 crc kubenswrapper[4926]: I1125 18:26:23.545574 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:23 crc kubenswrapper[4926]: I1125 18:26:23.561059 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 18:26:23 crc kubenswrapper[4926]: I1125 18:26:23.621873 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.372447 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.534332 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-b9mg6"] Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.537577 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.538344 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x"] Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.540399 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.540620 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-xbhks" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.540808 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.542191 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.545460 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.547399 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x"] Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.643348 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-zbmsj"] Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.644433 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.647420 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.647712 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-s4nxq" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.647850 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.650019 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-qccd5"] Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.651702 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.653924 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-qccd5"] Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.654038 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.661478 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.711115 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-startup\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.711566 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-conf\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.711696 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4611d03c-74a2-41ac-b29d-5629fce0b40c-cert\") pod \"frr-k8s-webhook-server-6998585d5-5xq6x\" (UID: \"4611d03c-74a2-41ac-b29d-5629fce0b40c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.711797 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7mjk\" (UniqueName: \"kubernetes.io/projected/4611d03c-74a2-41ac-b29d-5629fce0b40c-kube-api-access-r7mjk\") pod \"frr-k8s-webhook-server-6998585d5-5xq6x\" (UID: \"4611d03c-74a2-41ac-b29d-5629fce0b40c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.711933 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics-certs\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.712041 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j7qb\" (UniqueName: \"kubernetes.io/projected/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-kube-api-access-6j7qb\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.712128 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-sockets\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.712219 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.712325 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-reloader\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.813924 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814030 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/14e6944d-e69a-42b5-8645-e272346dd12d-metallb-excludel2\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-reloader\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814125 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-metrics-certs\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814198 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbhhd\" (UniqueName: \"kubernetes.io/projected/1d98e353-4142-4bcb-b9fb-489ebe6313be-kube-api-access-qbhhd\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814247 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-cert\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814292 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814323 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-startup\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814353 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-conf\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814431 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4611d03c-74a2-41ac-b29d-5629fce0b40c-cert\") pod \"frr-k8s-webhook-server-6998585d5-5xq6x\" (UID: \"4611d03c-74a2-41ac-b29d-5629fce0b40c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814470 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7mjk\" (UniqueName: \"kubernetes.io/projected/4611d03c-74a2-41ac-b29d-5629fce0b40c-kube-api-access-r7mjk\") pod \"frr-k8s-webhook-server-6998585d5-5xq6x\" (UID: \"4611d03c-74a2-41ac-b29d-5629fce0b40c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814506 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-metrics-certs\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814537 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkcp6\" (UniqueName: \"kubernetes.io/projected/14e6944d-e69a-42b5-8645-e272346dd12d-kube-api-access-tkcp6\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814568 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics-certs\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814599 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j7qb\" (UniqueName: \"kubernetes.io/projected/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-kube-api-access-6j7qb\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.814635 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-sockets\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.815397 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-sockets\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.816350 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-reloader\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.816478 4926 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.816549 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics-certs podName:8b57b9fd-a2d0-46fe-bfc1-250c356b162b nodeName:}" failed. No retries permitted until 2025-11-25 18:26:25.316526851 +0000 UTC m=+815.702040456 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics-certs") pod "frr-k8s-b9mg6" (UID: "8b57b9fd-a2d0-46fe-bfc1-250c356b162b") : secret "frr-k8s-certs-secret" not found Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.816557 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.816664 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-conf\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.818156 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-frr-startup\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.826646 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4611d03c-74a2-41ac-b29d-5629fce0b40c-cert\") pod \"frr-k8s-webhook-server-6998585d5-5xq6x\" (UID: \"4611d03c-74a2-41ac-b29d-5629fce0b40c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.852157 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j7qb\" (UniqueName: \"kubernetes.io/projected/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-kube-api-access-6j7qb\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.856132 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7mjk\" (UniqueName: \"kubernetes.io/projected/4611d03c-74a2-41ac-b29d-5629fce0b40c-kube-api-access-r7mjk\") pod \"frr-k8s-webhook-server-6998585d5-5xq6x\" (UID: \"4611d03c-74a2-41ac-b29d-5629fce0b40c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.898356 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.898444 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.900468 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.916467 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/14e6944d-e69a-42b5-8645-e272346dd12d-metallb-excludel2\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.916526 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-metrics-certs\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.916564 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbhhd\" (UniqueName: \"kubernetes.io/projected/1d98e353-4142-4bcb-b9fb-489ebe6313be-kube-api-access-qbhhd\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.916589 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-cert\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.916610 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.916642 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-metrics-certs\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.916658 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkcp6\" (UniqueName: \"kubernetes.io/projected/14e6944d-e69a-42b5-8645-e272346dd12d-kube-api-access-tkcp6\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.917258 4926 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.917311 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-metrics-certs podName:14e6944d-e69a-42b5-8645-e272346dd12d nodeName:}" failed. No retries permitted until 2025-11-25 18:26:25.417296982 +0000 UTC m=+815.802810587 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-metrics-certs") pod "speaker-zbmsj" (UID: "14e6944d-e69a-42b5-8645-e272346dd12d") : secret "speaker-certs-secret" not found Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.917262 4926 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.917570 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/14e6944d-e69a-42b5-8645-e272346dd12d-metallb-excludel2\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.917261 4926 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.917719 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist podName:14e6944d-e69a-42b5-8645-e272346dd12d nodeName:}" failed. No retries permitted until 2025-11-25 18:26:25.41757473 +0000 UTC m=+815.803088525 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist") pod "speaker-zbmsj" (UID: "14e6944d-e69a-42b5-8645-e272346dd12d") : secret "metallb-memberlist" not found Nov 25 18:26:24 crc kubenswrapper[4926]: E1125 18:26:24.917822 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-metrics-certs podName:1d98e353-4142-4bcb-b9fb-489ebe6313be nodeName:}" failed. No retries permitted until 2025-11-25 18:26:25.417805886 +0000 UTC m=+815.803319681 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-metrics-certs") pod "controller-6c7b4b5f48-qccd5" (UID: "1d98e353-4142-4bcb-b9fb-489ebe6313be") : secret "controller-certs-secret" not found Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.919786 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.933249 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-cert\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.935285 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkcp6\" (UniqueName: \"kubernetes.io/projected/14e6944d-e69a-42b5-8645-e272346dd12d-kube-api-access-tkcp6\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.942973 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbhhd\" (UniqueName: \"kubernetes.io/projected/1d98e353-4142-4bcb-b9fb-489ebe6313be-kube-api-access-qbhhd\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:24 crc kubenswrapper[4926]: I1125 18:26:24.972094 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.322419 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics-certs\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.344739 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8b57b9fd-a2d0-46fe-bfc1-250c356b162b-metrics-certs\") pod \"frr-k8s-b9mg6\" (UID: \"8b57b9fd-a2d0-46fe-bfc1-250c356b162b\") " pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.379291 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x"] Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.423311 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.423399 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-metrics-certs\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.423459 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-metrics-certs\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:25 crc kubenswrapper[4926]: E1125 18:26:25.423550 4926 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 18:26:25 crc kubenswrapper[4926]: E1125 18:26:25.423722 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist podName:14e6944d-e69a-42b5-8645-e272346dd12d nodeName:}" failed. No retries permitted until 2025-11-25 18:26:26.423679107 +0000 UTC m=+816.809192712 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist") pod "speaker-zbmsj" (UID: "14e6944d-e69a-42b5-8645-e272346dd12d") : secret "metallb-memberlist" not found Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.429018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-metrics-certs\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.429021 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1d98e353-4142-4bcb-b9fb-489ebe6313be-metrics-certs\") pod \"controller-6c7b4b5f48-qccd5\" (UID: \"1d98e353-4142-4bcb-b9fb-489ebe6313be\") " pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.431345 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.458445 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:25 crc kubenswrapper[4926]: I1125 18:26:25.574514 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.053046 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-qccd5"] Nov 25 18:26:26 crc kubenswrapper[4926]: W1125 18:26:26.071121 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d98e353_4142_4bcb_b9fb_489ebe6313be.slice/crio-9159e3393d64cb82981e42aa439ca4fffb7a6442a26e6d87ba7662b898c70cd9 WatchSource:0}: Error finding container 9159e3393d64cb82981e42aa439ca4fffb7a6442a26e6d87ba7662b898c70cd9: Status 404 returned error can't find the container with id 9159e3393d64cb82981e42aa439ca4fffb7a6442a26e6d87ba7662b898c70cd9 Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.350296 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerStarted","Data":"54cfbf2cac604e6af6ab5341d39bd04f975fed815c804a51e0b91cb2c4190594"} Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.356446 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" event={"ID":"4611d03c-74a2-41ac-b29d-5629fce0b40c","Type":"ContainerStarted","Data":"4f6e169c186d5c0202fb95243a44430757ad3fde52c3d489faee15ccfd218270"} Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.361162 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-qccd5" event={"ID":"1d98e353-4142-4bcb-b9fb-489ebe6313be","Type":"ContainerStarted","Data":"8275a5d81ac65bdc114e59494f67a5520e29e1c913aa2d0bed0d709c909f588c"} Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.361210 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-qccd5" event={"ID":"1d98e353-4142-4bcb-b9fb-489ebe6313be","Type":"ContainerStarted","Data":"9159e3393d64cb82981e42aa439ca4fffb7a6442a26e6d87ba7662b898c70cd9"} Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.363648 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ks968"] Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.364199 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ks968" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="registry-server" containerID="cri-o://ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236" gracePeriod=2 Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.438475 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.445058 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/14e6944d-e69a-42b5-8645-e272346dd12d-memberlist\") pod \"speaker-zbmsj\" (UID: \"14e6944d-e69a-42b5-8645-e272346dd12d\") " pod="metallb-system/speaker-zbmsj" Nov 25 18:26:26 crc kubenswrapper[4926]: I1125 18:26:26.466080 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zbmsj" Nov 25 18:26:27 crc kubenswrapper[4926]: I1125 18:26:27.381424 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-qccd5" event={"ID":"1d98e353-4142-4bcb-b9fb-489ebe6313be","Type":"ContainerStarted","Data":"0508b8f73230e86566de0689ab332d816d67b956497c89da92ba71d5359bc416"} Nov 25 18:26:27 crc kubenswrapper[4926]: I1125 18:26:27.381636 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:27 crc kubenswrapper[4926]: I1125 18:26:27.387246 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zbmsj" event={"ID":"14e6944d-e69a-42b5-8645-e272346dd12d","Type":"ContainerStarted","Data":"7a91a88f601a25baa6a32a06c7779c36abb7310e18e2b451fe3112d6e0aa02b2"} Nov 25 18:26:27 crc kubenswrapper[4926]: I1125 18:26:27.387298 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zbmsj" event={"ID":"14e6944d-e69a-42b5-8645-e272346dd12d","Type":"ContainerStarted","Data":"a9f140e57c6be5a7b020bd4e50839d5aa6db057f909f1a53e96816dc6bc59931"} Nov 25 18:26:27 crc kubenswrapper[4926]: I1125 18:26:27.405406 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-qccd5" podStartSLOduration=3.40538786 podStartE2EDuration="3.40538786s" podCreationTimestamp="2025-11-25 18:26:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:26:27.401972426 +0000 UTC m=+817.787486031" watchObservedRunningTime="2025-11-25 18:26:27.40538786 +0000 UTC m=+817.790901465" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.079565 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.199781 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-utilities\") pod \"5e60091b-d6b5-481a-82ab-36849df8e5fb\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.200298 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvjg5\" (UniqueName: \"kubernetes.io/projected/5e60091b-d6b5-481a-82ab-36849df8e5fb-kube-api-access-tvjg5\") pod \"5e60091b-d6b5-481a-82ab-36849df8e5fb\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.200512 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-catalog-content\") pod \"5e60091b-d6b5-481a-82ab-36849df8e5fb\" (UID: \"5e60091b-d6b5-481a-82ab-36849df8e5fb\") " Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.205143 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-utilities" (OuterVolumeSpecName: "utilities") pod "5e60091b-d6b5-481a-82ab-36849df8e5fb" (UID: "5e60091b-d6b5-481a-82ab-36849df8e5fb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.211601 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e60091b-d6b5-481a-82ab-36849df8e5fb-kube-api-access-tvjg5" (OuterVolumeSpecName: "kube-api-access-tvjg5") pod "5e60091b-d6b5-481a-82ab-36849df8e5fb" (UID: "5e60091b-d6b5-481a-82ab-36849df8e5fb"). InnerVolumeSpecName "kube-api-access-tvjg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.262963 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e60091b-d6b5-481a-82ab-36849df8e5fb" (UID: "5e60091b-d6b5-481a-82ab-36849df8e5fb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.302856 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvjg5\" (UniqueName: \"kubernetes.io/projected/5e60091b-d6b5-481a-82ab-36849df8e5fb-kube-api-access-tvjg5\") on node \"crc\" DevicePath \"\"" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.302907 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.302923 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e60091b-d6b5-481a-82ab-36849df8e5fb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.400453 4926 generic.go:334] "Generic (PLEG): container finished" podID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerID="ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236" exitCode=0 Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.400509 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ks968" event={"ID":"5e60091b-d6b5-481a-82ab-36849df8e5fb","Type":"ContainerDied","Data":"ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236"} Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.400560 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ks968" event={"ID":"5e60091b-d6b5-481a-82ab-36849df8e5fb","Type":"ContainerDied","Data":"85214d4412075805fde3509cfd3898a74d6c9400867d7c3ad1ff232e0411008e"} Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.400581 4926 scope.go:117] "RemoveContainer" containerID="ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.400751 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ks968" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.407563 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zbmsj" event={"ID":"14e6944d-e69a-42b5-8645-e272346dd12d","Type":"ContainerStarted","Data":"61e93ce893fd967fc3895699ba70aaf77d16a196bbadbb6e0b5ed977049995d3"} Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.407750 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-zbmsj" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.429494 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ks968"] Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.438761 4926 scope.go:117] "RemoveContainer" containerID="e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.439811 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ks968"] Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.457162 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-zbmsj" podStartSLOduration=4.457137619 podStartE2EDuration="4.457137619s" podCreationTimestamp="2025-11-25 18:26:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:26:28.447536876 +0000 UTC m=+818.833050481" watchObservedRunningTime="2025-11-25 18:26:28.457137619 +0000 UTC m=+818.842651224" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.471198 4926 scope.go:117] "RemoveContainer" containerID="fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.549140 4926 scope.go:117] "RemoveContainer" containerID="ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236" Nov 25 18:26:28 crc kubenswrapper[4926]: E1125 18:26:28.549832 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236\": container with ID starting with ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236 not found: ID does not exist" containerID="ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.549882 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236"} err="failed to get container status \"ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236\": rpc error: code = NotFound desc = could not find container \"ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236\": container with ID starting with ffea405880a57e77a61d5db8423b674f19a7fe80d63286030f07745331ed6236 not found: ID does not exist" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.549903 4926 scope.go:117] "RemoveContainer" containerID="e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6" Nov 25 18:26:28 crc kubenswrapper[4926]: E1125 18:26:28.550365 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6\": container with ID starting with e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6 not found: ID does not exist" containerID="e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.550404 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6"} err="failed to get container status \"e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6\": rpc error: code = NotFound desc = could not find container \"e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6\": container with ID starting with e1eee4f4170c4a4dc76597e6e7d231dbd0c1b245f35e92def91c211b7f5e19f6 not found: ID does not exist" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.550419 4926 scope.go:117] "RemoveContainer" containerID="fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab" Nov 25 18:26:28 crc kubenswrapper[4926]: E1125 18:26:28.554585 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab\": container with ID starting with fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab not found: ID does not exist" containerID="fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.554620 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab"} err="failed to get container status \"fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab\": rpc error: code = NotFound desc = could not find container \"fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab\": container with ID starting with fe8f221872f93b5b95992aaae1db27f0f1713a1b3992770e7c0faf61268037ab not found: ID does not exist" Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.562482 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rzl7m"] Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.562758 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rzl7m" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="registry-server" containerID="cri-o://0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c" gracePeriod=2 Nov 25 18:26:28 crc kubenswrapper[4926]: I1125 18:26:28.914210 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.012292 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-utilities\") pod \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.012496 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-catalog-content\") pod \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.012550 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x27zj\" (UniqueName: \"kubernetes.io/projected/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-kube-api-access-x27zj\") pod \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\" (UID: \"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2\") " Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.013301 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-utilities" (OuterVolumeSpecName: "utilities") pod "86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" (UID: "86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.028576 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" (UID: "86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.032870 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-kube-api-access-x27zj" (OuterVolumeSpecName: "kube-api-access-x27zj") pod "86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" (UID: "86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2"). InnerVolumeSpecName "kube-api-access-x27zj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.114337 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.114415 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x27zj\" (UniqueName: \"kubernetes.io/projected/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-kube-api-access-x27zj\") on node \"crc\" DevicePath \"\"" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.114437 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.429425 4926 generic.go:334] "Generic (PLEG): container finished" podID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerID="0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c" exitCode=0 Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.430261 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rzl7m" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.442469 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rzl7m" event={"ID":"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2","Type":"ContainerDied","Data":"0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c"} Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.442562 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rzl7m" event={"ID":"86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2","Type":"ContainerDied","Data":"0e05074062dc1d158bb8efbe20d593c211b91c0ef1ebcf4a61aa23844be9befe"} Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.442589 4926 scope.go:117] "RemoveContainer" containerID="0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.470527 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rzl7m"] Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.474135 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rzl7m"] Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.489485 4926 scope.go:117] "RemoveContainer" containerID="d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.540427 4926 scope.go:117] "RemoveContainer" containerID="0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.556452 4926 scope.go:117] "RemoveContainer" containerID="0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c" Nov 25 18:26:29 crc kubenswrapper[4926]: E1125 18:26:29.557916 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c\": container with ID starting with 0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c not found: ID does not exist" containerID="0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.557953 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c"} err="failed to get container status \"0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c\": rpc error: code = NotFound desc = could not find container \"0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c\": container with ID starting with 0435cc82e714ae852412d8d6920467a480c6582674a8bfa02db188aaae11ea7c not found: ID does not exist" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.557991 4926 scope.go:117] "RemoveContainer" containerID="d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4" Nov 25 18:26:29 crc kubenswrapper[4926]: E1125 18:26:29.558463 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4\": container with ID starting with d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4 not found: ID does not exist" containerID="d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.558532 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4"} err="failed to get container status \"d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4\": rpc error: code = NotFound desc = could not find container \"d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4\": container with ID starting with d50752e8265b4807e56196bdd9578d02155616c8a665008f5521eb5bdd9f26e4 not found: ID does not exist" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.558581 4926 scope.go:117] "RemoveContainer" containerID="0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d" Nov 25 18:26:29 crc kubenswrapper[4926]: E1125 18:26:29.559059 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d\": container with ID starting with 0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d not found: ID does not exist" containerID="0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d" Nov 25 18:26:29 crc kubenswrapper[4926]: I1125 18:26:29.559088 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d"} err="failed to get container status \"0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d\": rpc error: code = NotFound desc = could not find container \"0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d\": container with ID starting with 0899e7de471c13ed99be6cf0efc9bf6797a06ea707034f275762b15b09afef8d not found: ID does not exist" Nov 25 18:26:30 crc kubenswrapper[4926]: I1125 18:26:30.339776 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" path="/var/lib/kubelet/pods/5e60091b-d6b5-481a-82ab-36849df8e5fb/volumes" Nov 25 18:26:30 crc kubenswrapper[4926]: I1125 18:26:30.340553 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" path="/var/lib/kubelet/pods/86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2/volumes" Nov 25 18:26:34 crc kubenswrapper[4926]: I1125 18:26:34.487226 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b57b9fd-a2d0-46fe-bfc1-250c356b162b" containerID="dc79cb9fe896af93bcdcd2944094c32deec4521278dca848091dbd250ab16e05" exitCode=0 Nov 25 18:26:34 crc kubenswrapper[4926]: I1125 18:26:34.487356 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerDied","Data":"dc79cb9fe896af93bcdcd2944094c32deec4521278dca848091dbd250ab16e05"} Nov 25 18:26:34 crc kubenswrapper[4926]: I1125 18:26:34.490585 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" event={"ID":"4611d03c-74a2-41ac-b29d-5629fce0b40c","Type":"ContainerStarted","Data":"c81d00d81636e3e7153b310c8e36e5fe70b0c36611ca6be7b1895eeb89372c19"} Nov 25 18:26:34 crc kubenswrapper[4926]: I1125 18:26:34.490781 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:35 crc kubenswrapper[4926]: I1125 18:26:35.503923 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b57b9fd-a2d0-46fe-bfc1-250c356b162b" containerID="c7e65c28319a887b05599d8b43e87beeabf41f7b3e47eaf7518449e06368a364" exitCode=0 Nov 25 18:26:35 crc kubenswrapper[4926]: I1125 18:26:35.504029 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerDied","Data":"c7e65c28319a887b05599d8b43e87beeabf41f7b3e47eaf7518449e06368a364"} Nov 25 18:26:35 crc kubenswrapper[4926]: I1125 18:26:35.550228 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" podStartSLOduration=3.547912796 podStartE2EDuration="11.550206411s" podCreationTimestamp="2025-11-25 18:26:24 +0000 UTC" firstStartedPulling="2025-11-25 18:26:25.404332218 +0000 UTC m=+815.789845823" lastFinishedPulling="2025-11-25 18:26:33.406625823 +0000 UTC m=+823.792139438" observedRunningTime="2025-11-25 18:26:34.590891544 +0000 UTC m=+824.976405149" watchObservedRunningTime="2025-11-25 18:26:35.550206411 +0000 UTC m=+825.935720016" Nov 25 18:26:36 crc kubenswrapper[4926]: I1125 18:26:36.471507 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-zbmsj" Nov 25 18:26:36 crc kubenswrapper[4926]: I1125 18:26:36.516844 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b57b9fd-a2d0-46fe-bfc1-250c356b162b" containerID="6a435dbc7d4be07fcec72b441ade2af92e1a5c4fc6302e0f84b2ac4af92c99fc" exitCode=0 Nov 25 18:26:36 crc kubenswrapper[4926]: I1125 18:26:36.516908 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerDied","Data":"6a435dbc7d4be07fcec72b441ade2af92e1a5c4fc6302e0f84b2ac4af92c99fc"} Nov 25 18:26:37 crc kubenswrapper[4926]: I1125 18:26:37.530192 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerStarted","Data":"094dbc0e8a68034f7e42bd5c036d24f852c58d67edf6d45bf63674ca9469fa8f"} Nov 25 18:26:37 crc kubenswrapper[4926]: I1125 18:26:37.530843 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerStarted","Data":"5dcd920f6849742e3c1448e2cb484a0a99b03dc123f5ae0c72b7d6fb3279d2bb"} Nov 25 18:26:37 crc kubenswrapper[4926]: I1125 18:26:37.530863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerStarted","Data":"44dbc6a1e4ea4ab6cd37dbbf5c2af3fa2ec4d34fc9bce85d378c269b617ea63a"} Nov 25 18:26:37 crc kubenswrapper[4926]: I1125 18:26:37.530874 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerStarted","Data":"0e65034fa140163d40ed21f8269327e3cd6dc572bef922e3a0226ecacbd56280"} Nov 25 18:26:37 crc kubenswrapper[4926]: I1125 18:26:37.530888 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerStarted","Data":"9c7eadc91cc6c110ee950ef7557835884193a0e7e7c5cf421ce715727a8ad012"} Nov 25 18:26:38 crc kubenswrapper[4926]: I1125 18:26:38.544714 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-b9mg6" event={"ID":"8b57b9fd-a2d0-46fe-bfc1-250c356b162b","Type":"ContainerStarted","Data":"74ec94e110ebaea20e958643590cb9f05780c1213947dee256bfb00939920ec3"} Nov 25 18:26:38 crc kubenswrapper[4926]: I1125 18:26:38.546472 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:38 crc kubenswrapper[4926]: I1125 18:26:38.591571 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-b9mg6" podStartSLOduration=6.808967704 podStartE2EDuration="14.591548549s" podCreationTimestamp="2025-11-25 18:26:24 +0000 UTC" firstStartedPulling="2025-11-25 18:26:25.599406083 +0000 UTC m=+815.984919698" lastFinishedPulling="2025-11-25 18:26:33.381986938 +0000 UTC m=+823.767500543" observedRunningTime="2025-11-25 18:26:38.58209387 +0000 UTC m=+828.967607505" watchObservedRunningTime="2025-11-25 18:26:38.591548549 +0000 UTC m=+828.977062194" Nov 25 18:26:40 crc kubenswrapper[4926]: I1125 18:26:40.459546 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:40 crc kubenswrapper[4926]: I1125 18:26:40.510011 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.982994 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-5b4h5"] Nov 25 18:26:42 crc kubenswrapper[4926]: E1125 18:26:42.983790 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="extract-content" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.983808 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="extract-content" Nov 25 18:26:42 crc kubenswrapper[4926]: E1125 18:26:42.983831 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="registry-server" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.983838 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="registry-server" Nov 25 18:26:42 crc kubenswrapper[4926]: E1125 18:26:42.983863 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="extract-content" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.983870 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="extract-content" Nov 25 18:26:42 crc kubenswrapper[4926]: E1125 18:26:42.983887 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="registry-server" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.983894 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="registry-server" Nov 25 18:26:42 crc kubenswrapper[4926]: E1125 18:26:42.983904 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="extract-utilities" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.983912 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="extract-utilities" Nov 25 18:26:42 crc kubenswrapper[4926]: E1125 18:26:42.983922 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="extract-utilities" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.983928 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="extract-utilities" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.984047 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="86979fa3-a5bd-4bd6-a1eb-1533a96ef1a2" containerName="registry-server" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.984068 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e60091b-d6b5-481a-82ab-36849df8e5fb" containerName="registry-server" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.984719 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5b4h5" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.987214 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-v2szw" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.987881 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 18:26:42 crc kubenswrapper[4926]: I1125 18:26:42.988005 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 18:26:43 crc kubenswrapper[4926]: I1125 18:26:43.045612 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5b4h5"] Nov 25 18:26:43 crc kubenswrapper[4926]: I1125 18:26:43.064773 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxmbm\" (UniqueName: \"kubernetes.io/projected/1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba-kube-api-access-mxmbm\") pod \"openstack-operator-index-5b4h5\" (UID: \"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba\") " pod="openstack-operators/openstack-operator-index-5b4h5" Nov 25 18:26:43 crc kubenswrapper[4926]: I1125 18:26:43.166724 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxmbm\" (UniqueName: \"kubernetes.io/projected/1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba-kube-api-access-mxmbm\") pod \"openstack-operator-index-5b4h5\" (UID: \"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba\") " pod="openstack-operators/openstack-operator-index-5b4h5" Nov 25 18:26:43 crc kubenswrapper[4926]: I1125 18:26:43.192657 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxmbm\" (UniqueName: \"kubernetes.io/projected/1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba-kube-api-access-mxmbm\") pod \"openstack-operator-index-5b4h5\" (UID: \"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba\") " pod="openstack-operators/openstack-operator-index-5b4h5" Nov 25 18:26:43 crc kubenswrapper[4926]: I1125 18:26:43.304489 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5b4h5" Nov 25 18:26:43 crc kubenswrapper[4926]: I1125 18:26:43.706176 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5b4h5"] Nov 25 18:26:43 crc kubenswrapper[4926]: W1125 18:26:43.711607 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1815e71d_ae38_4d3f_a214_7a9c6ec0f9ba.slice/crio-b889f14c4b376e0b425a6db0c92f03ba813184dffac1c85a0abbd2b8e7138436 WatchSource:0}: Error finding container b889f14c4b376e0b425a6db0c92f03ba813184dffac1c85a0abbd2b8e7138436: Status 404 returned error can't find the container with id b889f14c4b376e0b425a6db0c92f03ba813184dffac1c85a0abbd2b8e7138436 Nov 25 18:26:44 crc kubenswrapper[4926]: I1125 18:26:44.592547 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5b4h5" event={"ID":"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba","Type":"ContainerStarted","Data":"b889f14c4b376e0b425a6db0c92f03ba813184dffac1c85a0abbd2b8e7138436"} Nov 25 18:26:44 crc kubenswrapper[4926]: I1125 18:26:44.910158 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5xq6x" Nov 25 18:26:45 crc kubenswrapper[4926]: I1125 18:26:45.581755 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-qccd5" Nov 25 18:26:46 crc kubenswrapper[4926]: I1125 18:26:46.619548 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5b4h5" event={"ID":"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba","Type":"ContainerStarted","Data":"55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781"} Nov 25 18:26:46 crc kubenswrapper[4926]: I1125 18:26:46.634244 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-5b4h5" podStartSLOduration=2.000081311 podStartE2EDuration="4.63421218s" podCreationTimestamp="2025-11-25 18:26:42 +0000 UTC" firstStartedPulling="2025-11-25 18:26:43.714416783 +0000 UTC m=+834.099930388" lastFinishedPulling="2025-11-25 18:26:46.348547642 +0000 UTC m=+836.734061257" observedRunningTime="2025-11-25 18:26:46.63240349 +0000 UTC m=+837.017917105" watchObservedRunningTime="2025-11-25 18:26:46.63421218 +0000 UTC m=+837.019725785" Nov 25 18:26:48 crc kubenswrapper[4926]: I1125 18:26:48.168670 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-5b4h5"] Nov 25 18:26:48 crc kubenswrapper[4926]: I1125 18:26:48.636195 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-5b4h5" podUID="1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba" containerName="registry-server" containerID="cri-o://55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781" gracePeriod=2 Nov 25 18:26:48 crc kubenswrapper[4926]: I1125 18:26:48.774064 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6xpmt"] Nov 25 18:26:48 crc kubenswrapper[4926]: I1125 18:26:48.775633 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:48 crc kubenswrapper[4926]: I1125 18:26:48.783404 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6xpmt"] Nov 25 18:26:48 crc kubenswrapper[4926]: I1125 18:26:48.870181 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp44m\" (UniqueName: \"kubernetes.io/projected/751df222-409a-4aaf-9558-2c777866237c-kube-api-access-jp44m\") pod \"openstack-operator-index-6xpmt\" (UID: \"751df222-409a-4aaf-9558-2c777866237c\") " pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:48 crc kubenswrapper[4926]: I1125 18:26:48.972945 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp44m\" (UniqueName: \"kubernetes.io/projected/751df222-409a-4aaf-9558-2c777866237c-kube-api-access-jp44m\") pod \"openstack-operator-index-6xpmt\" (UID: \"751df222-409a-4aaf-9558-2c777866237c\") " pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.001161 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp44m\" (UniqueName: \"kubernetes.io/projected/751df222-409a-4aaf-9558-2c777866237c-kube-api-access-jp44m\") pod \"openstack-operator-index-6xpmt\" (UID: \"751df222-409a-4aaf-9558-2c777866237c\") " pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.115480 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.585000 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5b4h5" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.593308 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6xpmt"] Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.651758 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6xpmt" event={"ID":"751df222-409a-4aaf-9558-2c777866237c","Type":"ContainerStarted","Data":"1271e391e77abcbbd7984a889b945234379aedffaea9be0364c341f15c9373bf"} Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.654633 4926 generic.go:334] "Generic (PLEG): container finished" podID="1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba" containerID="55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781" exitCode=0 Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.654689 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5b4h5" event={"ID":"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba","Type":"ContainerDied","Data":"55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781"} Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.654748 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5b4h5" event={"ID":"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba","Type":"ContainerDied","Data":"b889f14c4b376e0b425a6db0c92f03ba813184dffac1c85a0abbd2b8e7138436"} Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.654784 4926 scope.go:117] "RemoveContainer" containerID="55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.654691 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5b4h5" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.683701 4926 scope.go:117] "RemoveContainer" containerID="55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.684635 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mxmbm\" (UniqueName: \"kubernetes.io/projected/1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba-kube-api-access-mxmbm\") pod \"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba\" (UID: \"1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba\") " Nov 25 18:26:49 crc kubenswrapper[4926]: E1125 18:26:49.685031 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781\": container with ID starting with 55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781 not found: ID does not exist" containerID="55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.685067 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781"} err="failed to get container status \"55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781\": rpc error: code = NotFound desc = could not find container \"55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781\": container with ID starting with 55141684effb98c9bea4db7b41a4874a362e5f1ce79305842121ce3f97b58781 not found: ID does not exist" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.691109 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba-kube-api-access-mxmbm" (OuterVolumeSpecName: "kube-api-access-mxmbm") pod "1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba" (UID: "1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba"). InnerVolumeSpecName "kube-api-access-mxmbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.786693 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mxmbm\" (UniqueName: \"kubernetes.io/projected/1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba-kube-api-access-mxmbm\") on node \"crc\" DevicePath \"\"" Nov 25 18:26:49 crc kubenswrapper[4926]: I1125 18:26:49.994855 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-5b4h5"] Nov 25 18:26:50 crc kubenswrapper[4926]: I1125 18:26:50.013800 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-5b4h5"] Nov 25 18:26:50 crc kubenswrapper[4926]: I1125 18:26:50.349795 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba" path="/var/lib/kubelet/pods/1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba/volumes" Nov 25 18:26:50 crc kubenswrapper[4926]: I1125 18:26:50.672292 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6xpmt" event={"ID":"751df222-409a-4aaf-9558-2c777866237c","Type":"ContainerStarted","Data":"3496d98f08cda9598c840190db035e0d52d38d4f30cdcdd441229b930049cc5b"} Nov 25 18:26:50 crc kubenswrapper[4926]: I1125 18:26:50.700560 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6xpmt" podStartSLOduration=2.633910147 podStartE2EDuration="2.700518652s" podCreationTimestamp="2025-11-25 18:26:48 +0000 UTC" firstStartedPulling="2025-11-25 18:26:49.629537026 +0000 UTC m=+840.015050641" lastFinishedPulling="2025-11-25 18:26:49.696145541 +0000 UTC m=+840.081659146" observedRunningTime="2025-11-25 18:26:50.695332211 +0000 UTC m=+841.080845836" watchObservedRunningTime="2025-11-25 18:26:50.700518652 +0000 UTC m=+841.086032257" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.576391 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pcmm7"] Nov 25 18:26:53 crc kubenswrapper[4926]: E1125 18:26:53.577785 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba" containerName="registry-server" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.577803 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba" containerName="registry-server" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.577928 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1815e71d-ae38-4d3f-a214-7a9c6ec0f9ba" containerName="registry-server" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.578902 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.593612 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pcmm7"] Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.748652 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-utilities\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.748839 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-catalog-content\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.748899 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szl74\" (UniqueName: \"kubernetes.io/projected/b675acfd-2f67-43da-9c97-ade8c4a33274-kube-api-access-szl74\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.850985 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-catalog-content\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.852297 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-catalog-content\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.852567 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szl74\" (UniqueName: \"kubernetes.io/projected/b675acfd-2f67-43da-9c97-ade8c4a33274-kube-api-access-szl74\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.853417 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-utilities\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.854060 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-utilities\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.879094 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szl74\" (UniqueName: \"kubernetes.io/projected/b675acfd-2f67-43da-9c97-ade8c4a33274-kube-api-access-szl74\") pod \"certified-operators-pcmm7\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:53 crc kubenswrapper[4926]: I1125 18:26:53.896740 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:26:54 crc kubenswrapper[4926]: W1125 18:26:54.360480 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb675acfd_2f67_43da_9c97_ade8c4a33274.slice/crio-ea8a3a4c6c7a6187aba18631dda42abafce915671e1f757cb1fb97dda6f9556d WatchSource:0}: Error finding container ea8a3a4c6c7a6187aba18631dda42abafce915671e1f757cb1fb97dda6f9556d: Status 404 returned error can't find the container with id ea8a3a4c6c7a6187aba18631dda42abafce915671e1f757cb1fb97dda6f9556d Nov 25 18:26:54 crc kubenswrapper[4926]: I1125 18:26:54.363940 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pcmm7"] Nov 25 18:26:54 crc kubenswrapper[4926]: I1125 18:26:54.714722 4926 generic.go:334] "Generic (PLEG): container finished" podID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerID="4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649" exitCode=0 Nov 25 18:26:54 crc kubenswrapper[4926]: I1125 18:26:54.714794 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcmm7" event={"ID":"b675acfd-2f67-43da-9c97-ade8c4a33274","Type":"ContainerDied","Data":"4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649"} Nov 25 18:26:54 crc kubenswrapper[4926]: I1125 18:26:54.714838 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcmm7" event={"ID":"b675acfd-2f67-43da-9c97-ade8c4a33274","Type":"ContainerStarted","Data":"ea8a3a4c6c7a6187aba18631dda42abafce915671e1f757cb1fb97dda6f9556d"} Nov 25 18:26:55 crc kubenswrapper[4926]: I1125 18:26:55.465748 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-b9mg6" Nov 25 18:26:55 crc kubenswrapper[4926]: I1125 18:26:55.725835 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcmm7" event={"ID":"b675acfd-2f67-43da-9c97-ade8c4a33274","Type":"ContainerStarted","Data":"78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a"} Nov 25 18:26:56 crc kubenswrapper[4926]: I1125 18:26:56.741438 4926 generic.go:334] "Generic (PLEG): container finished" podID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerID="78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a" exitCode=0 Nov 25 18:26:56 crc kubenswrapper[4926]: I1125 18:26:56.741594 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcmm7" event={"ID":"b675acfd-2f67-43da-9c97-ade8c4a33274","Type":"ContainerDied","Data":"78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a"} Nov 25 18:26:57 crc kubenswrapper[4926]: I1125 18:26:57.755280 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcmm7" event={"ID":"b675acfd-2f67-43da-9c97-ade8c4a33274","Type":"ContainerStarted","Data":"5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef"} Nov 25 18:26:57 crc kubenswrapper[4926]: I1125 18:26:57.785249 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pcmm7" podStartSLOduration=2.350814074 podStartE2EDuration="4.785220287s" podCreationTimestamp="2025-11-25 18:26:53 +0000 UTC" firstStartedPulling="2025-11-25 18:26:54.717448788 +0000 UTC m=+845.102962393" lastFinishedPulling="2025-11-25 18:26:57.151855001 +0000 UTC m=+847.537368606" observedRunningTime="2025-11-25 18:26:57.780061595 +0000 UTC m=+848.165575220" watchObservedRunningTime="2025-11-25 18:26:57.785220287 +0000 UTC m=+848.170733892" Nov 25 18:26:59 crc kubenswrapper[4926]: I1125 18:26:59.116634 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:59 crc kubenswrapper[4926]: I1125 18:26:59.117102 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:59 crc kubenswrapper[4926]: I1125 18:26:59.151595 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:26:59 crc kubenswrapper[4926]: I1125 18:26:59.816199 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-6xpmt" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.414808 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw"] Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.416615 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.425276 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-7ktkz" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.432534 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw"] Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.591176 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-util\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.591467 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-bundle\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.591559 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9jxf\" (UniqueName: \"kubernetes.io/projected/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-kube-api-access-q9jxf\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.693205 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-util\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.693333 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-bundle\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.693410 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9jxf\" (UniqueName: \"kubernetes.io/projected/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-kube-api-access-q9jxf\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.693784 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-util\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.693976 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-bundle\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.717045 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9jxf\" (UniqueName: \"kubernetes.io/projected/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-kube-api-access-q9jxf\") pod \"72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:01 crc kubenswrapper[4926]: I1125 18:27:01.742494 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:02 crc kubenswrapper[4926]: I1125 18:27:02.275421 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw"] Nov 25 18:27:02 crc kubenswrapper[4926]: I1125 18:27:02.798746 4926 generic.go:334] "Generic (PLEG): container finished" podID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerID="786e79e460360bc7f82560be55d966e4ff64793d3bd907cab5e6111b0bfe6abc" exitCode=0 Nov 25 18:27:02 crc kubenswrapper[4926]: I1125 18:27:02.798843 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" event={"ID":"7a85c324-96e2-42e0-9c20-8dfcd6d203d4","Type":"ContainerDied","Data":"786e79e460360bc7f82560be55d966e4ff64793d3bd907cab5e6111b0bfe6abc"} Nov 25 18:27:02 crc kubenswrapper[4926]: I1125 18:27:02.799204 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" event={"ID":"7a85c324-96e2-42e0-9c20-8dfcd6d203d4","Type":"ContainerStarted","Data":"6680b917a2266018e47485598d6a357878bc34f66376216e55f74e7e9be0f6bd"} Nov 25 18:27:03 crc kubenswrapper[4926]: I1125 18:27:03.817970 4926 generic.go:334] "Generic (PLEG): container finished" podID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerID="3b66e8d0c62ab1feba8ded7f79ca4d6947f711990f18d7a5883db7349b44f253" exitCode=0 Nov 25 18:27:03 crc kubenswrapper[4926]: I1125 18:27:03.818101 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" event={"ID":"7a85c324-96e2-42e0-9c20-8dfcd6d203d4","Type":"ContainerDied","Data":"3b66e8d0c62ab1feba8ded7f79ca4d6947f711990f18d7a5883db7349b44f253"} Nov 25 18:27:03 crc kubenswrapper[4926]: I1125 18:27:03.897720 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:27:03 crc kubenswrapper[4926]: I1125 18:27:03.897784 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:27:04 crc kubenswrapper[4926]: I1125 18:27:04.000963 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:27:04 crc kubenswrapper[4926]: I1125 18:27:04.831425 4926 generic.go:334] "Generic (PLEG): container finished" podID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerID="9609dacadd7d30ff19c1fd04a696977787f5898c559d3c1b30c0e605858a1e4f" exitCode=0 Nov 25 18:27:04 crc kubenswrapper[4926]: I1125 18:27:04.831555 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" event={"ID":"7a85c324-96e2-42e0-9c20-8dfcd6d203d4","Type":"ContainerDied","Data":"9609dacadd7d30ff19c1fd04a696977787f5898c559d3c1b30c0e605858a1e4f"} Nov 25 18:27:04 crc kubenswrapper[4926]: I1125 18:27:04.883258 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.220650 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.362669 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-util\") pod \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.362806 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-bundle\") pod \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.362876 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9jxf\" (UniqueName: \"kubernetes.io/projected/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-kube-api-access-q9jxf\") pod \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\" (UID: \"7a85c324-96e2-42e0-9c20-8dfcd6d203d4\") " Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.363750 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-bundle" (OuterVolumeSpecName: "bundle") pod "7a85c324-96e2-42e0-9c20-8dfcd6d203d4" (UID: "7a85c324-96e2-42e0-9c20-8dfcd6d203d4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.374259 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-kube-api-access-q9jxf" (OuterVolumeSpecName: "kube-api-access-q9jxf") pod "7a85c324-96e2-42e0-9c20-8dfcd6d203d4" (UID: "7a85c324-96e2-42e0-9c20-8dfcd6d203d4"). InnerVolumeSpecName "kube-api-access-q9jxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.392041 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-util" (OuterVolumeSpecName: "util") pod "7a85c324-96e2-42e0-9c20-8dfcd6d203d4" (UID: "7a85c324-96e2-42e0-9c20-8dfcd6d203d4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.465182 4926 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-util\") on node \"crc\" DevicePath \"\"" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.465250 4926 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.465279 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9jxf\" (UniqueName: \"kubernetes.io/projected/7a85c324-96e2-42e0-9c20-8dfcd6d203d4-kube-api-access-q9jxf\") on node \"crc\" DevicePath \"\"" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.763509 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pcmm7"] Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.852455 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" event={"ID":"7a85c324-96e2-42e0-9c20-8dfcd6d203d4","Type":"ContainerDied","Data":"6680b917a2266018e47485598d6a357878bc34f66376216e55f74e7e9be0f6bd"} Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.852502 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.852514 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6680b917a2266018e47485598d6a357878bc34f66376216e55f74e7e9be0f6bd" Nov 25 18:27:06 crc kubenswrapper[4926]: I1125 18:27:06.852677 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pcmm7" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="registry-server" containerID="cri-o://5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef" gracePeriod=2 Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.346236 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.502711 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-utilities\") pod \"b675acfd-2f67-43da-9c97-ade8c4a33274\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.502889 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-catalog-content\") pod \"b675acfd-2f67-43da-9c97-ade8c4a33274\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.502921 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szl74\" (UniqueName: \"kubernetes.io/projected/b675acfd-2f67-43da-9c97-ade8c4a33274-kube-api-access-szl74\") pod \"b675acfd-2f67-43da-9c97-ade8c4a33274\" (UID: \"b675acfd-2f67-43da-9c97-ade8c4a33274\") " Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.503822 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-utilities" (OuterVolumeSpecName: "utilities") pod "b675acfd-2f67-43da-9c97-ade8c4a33274" (UID: "b675acfd-2f67-43da-9c97-ade8c4a33274"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.519697 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b675acfd-2f67-43da-9c97-ade8c4a33274-kube-api-access-szl74" (OuterVolumeSpecName: "kube-api-access-szl74") pod "b675acfd-2f67-43da-9c97-ade8c4a33274" (UID: "b675acfd-2f67-43da-9c97-ade8c4a33274"). InnerVolumeSpecName "kube-api-access-szl74". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.553280 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b675acfd-2f67-43da-9c97-ade8c4a33274" (UID: "b675acfd-2f67-43da-9c97-ade8c4a33274"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.605055 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.605106 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szl74\" (UniqueName: \"kubernetes.io/projected/b675acfd-2f67-43da-9c97-ade8c4a33274-kube-api-access-szl74\") on node \"crc\" DevicePath \"\"" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.605121 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b675acfd-2f67-43da-9c97-ade8c4a33274-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.862945 4926 generic.go:334] "Generic (PLEG): container finished" podID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerID="5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef" exitCode=0 Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.862988 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcmm7" event={"ID":"b675acfd-2f67-43da-9c97-ade8c4a33274","Type":"ContainerDied","Data":"5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef"} Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.863015 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcmm7" event={"ID":"b675acfd-2f67-43da-9c97-ade8c4a33274","Type":"ContainerDied","Data":"ea8a3a4c6c7a6187aba18631dda42abafce915671e1f757cb1fb97dda6f9556d"} Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.863034 4926 scope.go:117] "RemoveContainer" containerID="5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.863143 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcmm7" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.895863 4926 scope.go:117] "RemoveContainer" containerID="78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.896533 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pcmm7"] Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.899859 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pcmm7"] Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.925024 4926 scope.go:117] "RemoveContainer" containerID="4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.944742 4926 scope.go:117] "RemoveContainer" containerID="5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef" Nov 25 18:27:07 crc kubenswrapper[4926]: E1125 18:27:07.945470 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef\": container with ID starting with 5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef not found: ID does not exist" containerID="5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.945504 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef"} err="failed to get container status \"5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef\": rpc error: code = NotFound desc = could not find container \"5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef\": container with ID starting with 5e58fc2436e3f1301b312f1eae15f66814deac1ee704206991bf70337b8193ef not found: ID does not exist" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.945528 4926 scope.go:117] "RemoveContainer" containerID="78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a" Nov 25 18:27:07 crc kubenswrapper[4926]: E1125 18:27:07.945992 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a\": container with ID starting with 78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a not found: ID does not exist" containerID="78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.946111 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a"} err="failed to get container status \"78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a\": rpc error: code = NotFound desc = could not find container \"78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a\": container with ID starting with 78c064a65474faf90838ee160e10854692995192379c34c76474563f3735890a not found: ID does not exist" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.946208 4926 scope.go:117] "RemoveContainer" containerID="4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649" Nov 25 18:27:07 crc kubenswrapper[4926]: E1125 18:27:07.946594 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649\": container with ID starting with 4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649 not found: ID does not exist" containerID="4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649" Nov 25 18:27:07 crc kubenswrapper[4926]: I1125 18:27:07.946616 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649"} err="failed to get container status \"4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649\": rpc error: code = NotFound desc = could not find container \"4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649\": container with ID starting with 4fbe70a3d198155cde95caed07aa3baf8f6e4bd62319847ae2703081fb642649 not found: ID does not exist" Nov 25 18:27:08 crc kubenswrapper[4926]: I1125 18:27:08.340863 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" path="/var/lib/kubelet/pods/b675acfd-2f67-43da-9c97-ade8c4a33274/volumes" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.663510 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw"] Nov 25 18:27:11 crc kubenswrapper[4926]: E1125 18:27:11.664521 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerName="pull" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664542 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerName="pull" Nov 25 18:27:11 crc kubenswrapper[4926]: E1125 18:27:11.664555 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="extract-utilities" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664564 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="extract-utilities" Nov 25 18:27:11 crc kubenswrapper[4926]: E1125 18:27:11.664575 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="registry-server" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664586 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="registry-server" Nov 25 18:27:11 crc kubenswrapper[4926]: E1125 18:27:11.664607 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerName="extract" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664616 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerName="extract" Nov 25 18:27:11 crc kubenswrapper[4926]: E1125 18:27:11.664632 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerName="util" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664639 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerName="util" Nov 25 18:27:11 crc kubenswrapper[4926]: E1125 18:27:11.664663 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="extract-content" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664670 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="extract-content" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664840 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b675acfd-2f67-43da-9c97-ade8c4a33274" containerName="registry-server" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.664866 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a85c324-96e2-42e0-9c20-8dfcd6d203d4" containerName="extract" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.665547 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.669668 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-9xmqj" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.690538 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw"] Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.779269 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mklfv\" (UniqueName: \"kubernetes.io/projected/50b0b29f-bc51-4109-88ce-84d3223fc78e-kube-api-access-mklfv\") pod \"openstack-operator-controller-operator-7d958449d8-hxqgw\" (UID: \"50b0b29f-bc51-4109-88ce-84d3223fc78e\") " pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.881352 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mklfv\" (UniqueName: \"kubernetes.io/projected/50b0b29f-bc51-4109-88ce-84d3223fc78e-kube-api-access-mklfv\") pod \"openstack-operator-controller-operator-7d958449d8-hxqgw\" (UID: \"50b0b29f-bc51-4109-88ce-84d3223fc78e\") " pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.903865 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mklfv\" (UniqueName: \"kubernetes.io/projected/50b0b29f-bc51-4109-88ce-84d3223fc78e-kube-api-access-mklfv\") pod \"openstack-operator-controller-operator-7d958449d8-hxqgw\" (UID: \"50b0b29f-bc51-4109-88ce-84d3223fc78e\") " pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 18:27:11 crc kubenswrapper[4926]: I1125 18:27:11.995849 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 18:27:12 crc kubenswrapper[4926]: I1125 18:27:12.548059 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw"] Nov 25 18:27:12 crc kubenswrapper[4926]: I1125 18:27:12.917583 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" event={"ID":"50b0b29f-bc51-4109-88ce-84d3223fc78e","Type":"ContainerStarted","Data":"adb280e2dbc7affbf1265b10954afa83265d7b1d3d782cb33c6936f6fed8955c"} Nov 25 18:27:16 crc kubenswrapper[4926]: I1125 18:27:16.949315 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" event={"ID":"50b0b29f-bc51-4109-88ce-84d3223fc78e","Type":"ContainerStarted","Data":"0c82d61551d4c6b2303a001e4a667b7bf3dbfb6079a898be3a217aaaaf186484"} Nov 25 18:27:16 crc kubenswrapper[4926]: I1125 18:27:16.950220 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 18:27:17 crc kubenswrapper[4926]: I1125 18:27:17.000769 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" podStartSLOduration=1.984820896 podStartE2EDuration="6.000748148s" podCreationTimestamp="2025-11-25 18:27:11 +0000 UTC" firstStartedPulling="2025-11-25 18:27:12.562624926 +0000 UTC m=+862.948138531" lastFinishedPulling="2025-11-25 18:27:16.578552188 +0000 UTC m=+866.964065783" observedRunningTime="2025-11-25 18:27:16.996862519 +0000 UTC m=+867.382376124" watchObservedRunningTime="2025-11-25 18:27:17.000748148 +0000 UTC m=+867.386261763" Nov 25 18:27:22 crc kubenswrapper[4926]: I1125 18:27:21.999450 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.703444 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.707455 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.709114 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.710410 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.711169 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-r6pjt" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.711807 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-zg5ps" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.735272 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.749919 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-bpsp8"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.751768 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.765036 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-8sqwv" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.770437 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.774487 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-bpsp8"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.780269 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.781596 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.790840 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-zx2dc" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.804454 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.810741 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpgjq\" (UniqueName: \"kubernetes.io/projected/e949ca02-dbd2-4361-8b44-a498d1ec4c13-kube-api-access-cpgjq\") pod \"designate-operator-controller-manager-955677c94-bpsp8\" (UID: \"e949ca02-dbd2-4361-8b44-a498d1ec4c13\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.811053 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgg7m\" (UniqueName: \"kubernetes.io/projected/c8395389-762a-497d-972e-0987350a9a00-kube-api-access-zgg7m\") pod \"barbican-operator-controller-manager-7b64f4fb85-hhl9b\" (UID: \"c8395389-762a-497d-972e-0987350a9a00\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.811184 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m5kl\" (UniqueName: \"kubernetes.io/projected/421c1930-795c-4e93-9865-bff40d49ddf5-kube-api-access-7m5kl\") pod \"cinder-operator-controller-manager-6b7f75547b-dxhsp\" (UID: \"421c1930-795c-4e93-9865-bff40d49ddf5\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.811181 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.819295 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.820526 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.824936 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-g6hs9" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.825856 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-fhcgp" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.845345 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.855875 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.874119 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.894830 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.896336 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.899250 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-4dgcp" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.899732 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.904655 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl"] Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.906332 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.909589 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-8vc99" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.912570 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m5kl\" (UniqueName: \"kubernetes.io/projected/421c1930-795c-4e93-9865-bff40d49ddf5-kube-api-access-7m5kl\") pod \"cinder-operator-controller-manager-6b7f75547b-dxhsp\" (UID: \"421c1930-795c-4e93-9865-bff40d49ddf5\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.913494 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5b9h\" (UniqueName: \"kubernetes.io/projected/c8322c05-5b96-4489-87a7-1677f90df80c-kube-api-access-g5b9h\") pod \"glance-operator-controller-manager-589cbd6b5b-8w6rx\" (UID: \"c8322c05-5b96-4489-87a7-1677f90df80c\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.913618 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpgjq\" (UniqueName: \"kubernetes.io/projected/e949ca02-dbd2-4361-8b44-a498d1ec4c13-kube-api-access-cpgjq\") pod \"designate-operator-controller-manager-955677c94-bpsp8\" (UID: \"e949ca02-dbd2-4361-8b44-a498d1ec4c13\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.913721 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmc5z\" (UniqueName: \"kubernetes.io/projected/1df300a3-1d64-4e46-a0b5-9fe0bf029321-kube-api-access-wmc5z\") pod \"horizon-operator-controller-manager-5d494799bf-h55p4\" (UID: \"1df300a3-1d64-4e46-a0b5-9fe0bf029321\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.913825 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vg6r\" (UniqueName: \"kubernetes.io/projected/3b14286a-e339-4bd3-835c-67287c341869-kube-api-access-5vg6r\") pod \"heat-operator-controller-manager-5b77f656f-5q59t\" (UID: \"3b14286a-e339-4bd3-835c-67287c341869\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.913914 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgg7m\" (UniqueName: \"kubernetes.io/projected/c8395389-762a-497d-972e-0987350a9a00-kube-api-access-zgg7m\") pod \"barbican-operator-controller-manager-7b64f4fb85-hhl9b\" (UID: \"c8395389-762a-497d-972e-0987350a9a00\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 18:28:00 crc kubenswrapper[4926]: I1125 18:28:00.957649 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.002539 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m5kl\" (UniqueName: \"kubernetes.io/projected/421c1930-795c-4e93-9865-bff40d49ddf5-kube-api-access-7m5kl\") pod \"cinder-operator-controller-manager-6b7f75547b-dxhsp\" (UID: \"421c1930-795c-4e93-9865-bff40d49ddf5\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.025608 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5b9h\" (UniqueName: \"kubernetes.io/projected/c8322c05-5b96-4489-87a7-1677f90df80c-kube-api-access-g5b9h\") pod \"glance-operator-controller-manager-589cbd6b5b-8w6rx\" (UID: \"c8322c05-5b96-4489-87a7-1677f90df80c\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.025698 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmc5z\" (UniqueName: \"kubernetes.io/projected/1df300a3-1d64-4e46-a0b5-9fe0bf029321-kube-api-access-wmc5z\") pod \"horizon-operator-controller-manager-5d494799bf-h55p4\" (UID: \"1df300a3-1d64-4e46-a0b5-9fe0bf029321\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.025738 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vg6r\" (UniqueName: \"kubernetes.io/projected/3b14286a-e339-4bd3-835c-67287c341869-kube-api-access-5vg6r\") pod \"heat-operator-controller-manager-5b77f656f-5q59t\" (UID: \"3b14286a-e339-4bd3-835c-67287c341869\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.010468 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpgjq\" (UniqueName: \"kubernetes.io/projected/e949ca02-dbd2-4361-8b44-a498d1ec4c13-kube-api-access-cpgjq\") pod \"designate-operator-controller-manager-955677c94-bpsp8\" (UID: \"e949ca02-dbd2-4361-8b44-a498d1ec4c13\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.039834 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgg7m\" (UniqueName: \"kubernetes.io/projected/c8395389-762a-497d-972e-0987350a9a00-kube-api-access-zgg7m\") pod \"barbican-operator-controller-manager-7b64f4fb85-hhl9b\" (UID: \"c8395389-762a-497d-972e-0987350a9a00\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.040422 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.041660 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.045014 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.045065 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-98qn6" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.045790 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmc5z\" (UniqueName: \"kubernetes.io/projected/1df300a3-1d64-4e46-a0b5-9fe0bf029321-kube-api-access-wmc5z\") pod \"horizon-operator-controller-manager-5d494799bf-h55p4\" (UID: \"1df300a3-1d64-4e46-a0b5-9fe0bf029321\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.053043 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.056871 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.074734 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5b9h\" (UniqueName: \"kubernetes.io/projected/c8322c05-5b96-4489-87a7-1677f90df80c-kube-api-access-g5b9h\") pod \"glance-operator-controller-manager-589cbd6b5b-8w6rx\" (UID: \"c8322c05-5b96-4489-87a7-1677f90df80c\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.089423 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.091553 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.101221 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vg6r\" (UniqueName: \"kubernetes.io/projected/3b14286a-e339-4bd3-835c-67287c341869-kube-api-access-5vg6r\") pod \"heat-operator-controller-manager-5b77f656f-5q59t\" (UID: \"3b14286a-e339-4bd3-835c-67287c341869\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.125963 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.126995 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl4dz\" (UniqueName: \"kubernetes.io/projected/b70bd0b1-5555-49f4-ae5f-dfeebd005029-kube-api-access-jl4dz\") pod \"ironic-operator-controller-manager-67cb4dc6d4-89dkl\" (UID: \"b70bd0b1-5555-49f4-ae5f-dfeebd005029\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.127020 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.127069 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr6r6\" (UniqueName: \"kubernetes.io/projected/c67a3051-deee-4c35-b2fd-73f0f96ccbac-kube-api-access-jr6r6\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.145780 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.147065 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.156328 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-zt2sz" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.170693 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.171803 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.173847 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.177497 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-wrs5l" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.179374 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.191308 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.192674 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.196773 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-8btdl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.200588 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.204756 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.213465 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.228577 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr6r6\" (UniqueName: \"kubernetes.io/projected/c67a3051-deee-4c35-b2fd-73f0f96ccbac-kube-api-access-jr6r6\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.228676 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl4dz\" (UniqueName: \"kubernetes.io/projected/b70bd0b1-5555-49f4-ae5f-dfeebd005029-kube-api-access-jl4dz\") pod \"ironic-operator-controller-manager-67cb4dc6d4-89dkl\" (UID: \"b70bd0b1-5555-49f4-ae5f-dfeebd005029\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.228705 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.228741 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtht6\" (UniqueName: \"kubernetes.io/projected/9dfe0bac-7a60-47c9-bef9-e34a75d23521-kube-api-access-xtht6\") pod \"keystone-operator-controller-manager-7b4567c7cf-gr8fl\" (UID: \"9dfe0bac-7a60-47c9-bef9-e34a75d23521\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.229305 4926 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.229373 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert podName:c67a3051-deee-4c35-b2fd-73f0f96ccbac nodeName:}" failed. No retries permitted until 2025-11-25 18:28:01.72934594 +0000 UTC m=+912.114859545 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert") pod "infra-operator-controller-manager-57548d458d-l4vqr" (UID: "c67a3051-deee-4c35-b2fd-73f0f96ccbac") : secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.238572 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.239923 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.246560 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-2xr6z" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.252121 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.264658 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr6r6\" (UniqueName: \"kubernetes.io/projected/c67a3051-deee-4c35-b2fd-73f0f96ccbac-kube-api-access-jr6r6\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.278361 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.283207 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl4dz\" (UniqueName: \"kubernetes.io/projected/b70bd0b1-5555-49f4-ae5f-dfeebd005029-kube-api-access-jl4dz\") pod \"ironic-operator-controller-manager-67cb4dc6d4-89dkl\" (UID: \"b70bd0b1-5555-49f4-ae5f-dfeebd005029\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.285964 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.286081 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.289996 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.299313 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-qxv8d" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.302762 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.304016 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.307801 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.315048 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.319442 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.322789 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.327839 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.331309 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-bn9q5" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.332912 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqzjx\" (UniqueName: \"kubernetes.io/projected/b4c6b194-9a8e-4cdb-a0e0-e67dce03328f-kube-api-access-lqzjx\") pod \"neutron-operator-controller-manager-6fdcddb789-rslqc\" (UID: \"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.333012 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdz9b\" (UniqueName: \"kubernetes.io/projected/c613eed5-f72e-4b4d-8283-5aa4e6241157-kube-api-access-cdz9b\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-5nvnv\" (UID: \"c613eed5-f72e-4b4d-8283-5aa4e6241157\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.333113 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsdl4\" (UniqueName: \"kubernetes.io/projected/5859a238-ed77-4ef7-ac69-295bd1c875c3-kube-api-access-bsdl4\") pod \"manila-operator-controller-manager-5d499bf58b-44shk\" (UID: \"5859a238-ed77-4ef7-ac69-295bd1c875c3\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.333168 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-jz7bq" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.333257 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtht6\" (UniqueName: \"kubernetes.io/projected/9dfe0bac-7a60-47c9-bef9-e34a75d23521-kube-api-access-xtht6\") pod \"keystone-operator-controller-manager-7b4567c7cf-gr8fl\" (UID: \"9dfe0bac-7a60-47c9-bef9-e34a75d23521\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.333480 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.333634 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-vhkn7" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.338751 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.355527 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.371238 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtht6\" (UniqueName: \"kubernetes.io/projected/9dfe0bac-7a60-47c9-bef9-e34a75d23521-kube-api-access-xtht6\") pod \"keystone-operator-controller-manager-7b4567c7cf-gr8fl\" (UID: \"9dfe0bac-7a60-47c9-bef9-e34a75d23521\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.398109 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.402809 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.403389 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.404602 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.406380 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-dfq27" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.406768 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-8spnr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.419821 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.424938 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434161 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdz9b\" (UniqueName: \"kubernetes.io/projected/c613eed5-f72e-4b4d-8283-5aa4e6241157-kube-api-access-cdz9b\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-5nvnv\" (UID: \"c613eed5-f72e-4b4d-8283-5aa4e6241157\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434234 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xl76\" (UniqueName: \"kubernetes.io/projected/4e869634-c2f9-4248-8ad7-dd9af0315f2b-kube-api-access-2xl76\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434285 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsdl4\" (UniqueName: \"kubernetes.io/projected/5859a238-ed77-4ef7-ac69-295bd1c875c3-kube-api-access-bsdl4\") pod \"manila-operator-controller-manager-5d499bf58b-44shk\" (UID: \"5859a238-ed77-4ef7-ac69-295bd1c875c3\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434303 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljp24\" (UniqueName: \"kubernetes.io/projected/d992fc2a-a506-4c10-a8fa-1e3416074e73-kube-api-access-ljp24\") pod \"ovn-operator-controller-manager-56897c768d-9f2dg\" (UID: \"d992fc2a-a506-4c10-a8fa-1e3416074e73\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434337 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvvbx\" (UniqueName: \"kubernetes.io/projected/306a2bb2-20b9-436d-809a-55499e85e4d6-kube-api-access-cvvbx\") pod \"nova-operator-controller-manager-79556f57fc-rl7hc\" (UID: \"306a2bb2-20b9-436d-809a-55499e85e4d6\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434425 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgwzh\" (UniqueName: \"kubernetes.io/projected/74627669-e952-4db6-b082-5e7bd38b03b3-kube-api-access-xgwzh\") pod \"placement-operator-controller-manager-57988cc5b5-k8j22\" (UID: \"74627669-e952-4db6-b082-5e7bd38b03b3\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434444 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434489 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqzjx\" (UniqueName: \"kubernetes.io/projected/b4c6b194-9a8e-4cdb-a0e0-e67dce03328f-kube-api-access-lqzjx\") pod \"neutron-operator-controller-manager-6fdcddb789-rslqc\" (UID: \"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.434516 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9ql5\" (UniqueName: \"kubernetes.io/projected/f9d1a5dc-de6e-45fa-ab5d-1de529f40894-kube-api-access-s9ql5\") pod \"octavia-operator-controller-manager-64cdc6ff96-s4wxr\" (UID: \"f9d1a5dc-de6e-45fa-ab5d-1de529f40894\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.451180 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.490953 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.510126 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.510273 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.518477 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsdl4\" (UniqueName: \"kubernetes.io/projected/5859a238-ed77-4ef7-ac69-295bd1c875c3-kube-api-access-bsdl4\") pod \"manila-operator-controller-manager-5d499bf58b-44shk\" (UID: \"5859a238-ed77-4ef7-ac69-295bd1c875c3\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.518782 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-v9t8w" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.537423 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xl76\" (UniqueName: \"kubernetes.io/projected/4e869634-c2f9-4248-8ad7-dd9af0315f2b-kube-api-access-2xl76\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.537482 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljp24\" (UniqueName: \"kubernetes.io/projected/d992fc2a-a506-4c10-a8fa-1e3416074e73-kube-api-access-ljp24\") pod \"ovn-operator-controller-manager-56897c768d-9f2dg\" (UID: \"d992fc2a-a506-4c10-a8fa-1e3416074e73\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.537509 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvvbx\" (UniqueName: \"kubernetes.io/projected/306a2bb2-20b9-436d-809a-55499e85e4d6-kube-api-access-cvvbx\") pod \"nova-operator-controller-manager-79556f57fc-rl7hc\" (UID: \"306a2bb2-20b9-436d-809a-55499e85e4d6\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.537558 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgwzh\" (UniqueName: \"kubernetes.io/projected/74627669-e952-4db6-b082-5e7bd38b03b3-kube-api-access-xgwzh\") pod \"placement-operator-controller-manager-57988cc5b5-k8j22\" (UID: \"74627669-e952-4db6-b082-5e7bd38b03b3\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.537580 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v75dw\" (UniqueName: \"kubernetes.io/projected/230b098e-8a89-417e-b5aa-994695273779-kube-api-access-v75dw\") pod \"swift-operator-controller-manager-d77b94747-gcvkp\" (UID: \"230b098e-8a89-417e-b5aa-994695273779\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.537573 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4"] Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.537698 4926 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.537761 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert podName:4e869634-c2f9-4248-8ad7-dd9af0315f2b nodeName:}" failed. No retries permitted until 2025-11-25 18:28:02.037737537 +0000 UTC m=+912.423251142 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" (UID: "4e869634-c2f9-4248-8ad7-dd9af0315f2b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.537600 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.539194 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8kvq\" (UniqueName: \"kubernetes.io/projected/5885db97-a86c-482e-9851-2d8351dc0c3a-kube-api-access-z8kvq\") pod \"telemetry-operator-controller-manager-76cc84c6bb-mc5kd\" (UID: \"5885db97-a86c-482e-9851-2d8351dc0c3a\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.539258 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9ql5\" (UniqueName: \"kubernetes.io/projected/f9d1a5dc-de6e-45fa-ab5d-1de529f40894-kube-api-access-s9ql5\") pod \"octavia-operator-controller-manager-64cdc6ff96-s4wxr\" (UID: \"f9d1a5dc-de6e-45fa-ab5d-1de529f40894\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.539734 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.553333 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-vt7dx" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.556417 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdz9b\" (UniqueName: \"kubernetes.io/projected/c613eed5-f72e-4b4d-8283-5aa4e6241157-kube-api-access-cdz9b\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-5nvnv\" (UID: \"c613eed5-f72e-4b4d-8283-5aa4e6241157\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.575512 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.582030 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqzjx\" (UniqueName: \"kubernetes.io/projected/b4c6b194-9a8e-4cdb-a0e0-e67dce03328f-kube-api-access-lqzjx\") pod \"neutron-operator-controller-manager-6fdcddb789-rslqc\" (UID: \"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.591162 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9ql5\" (UniqueName: \"kubernetes.io/projected/f9d1a5dc-de6e-45fa-ab5d-1de529f40894-kube-api-access-s9ql5\") pod \"octavia-operator-controller-manager-64cdc6ff96-s4wxr\" (UID: \"f9d1a5dc-de6e-45fa-ab5d-1de529f40894\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.591454 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvvbx\" (UniqueName: \"kubernetes.io/projected/306a2bb2-20b9-436d-809a-55499e85e4d6-kube-api-access-cvvbx\") pod \"nova-operator-controller-manager-79556f57fc-rl7hc\" (UID: \"306a2bb2-20b9-436d-809a-55499e85e4d6\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.596832 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljp24\" (UniqueName: \"kubernetes.io/projected/d992fc2a-a506-4c10-a8fa-1e3416074e73-kube-api-access-ljp24\") pod \"ovn-operator-controller-manager-56897c768d-9f2dg\" (UID: \"d992fc2a-a506-4c10-a8fa-1e3416074e73\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.607096 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xl76\" (UniqueName: \"kubernetes.io/projected/4e869634-c2f9-4248-8ad7-dd9af0315f2b-kube-api-access-2xl76\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.607292 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgwzh\" (UniqueName: \"kubernetes.io/projected/74627669-e952-4db6-b082-5e7bd38b03b3-kube-api-access-xgwzh\") pod \"placement-operator-controller-manager-57988cc5b5-k8j22\" (UID: \"74627669-e952-4db6-b082-5e7bd38b03b3\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.625467 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.626695 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.640225 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fd8k7" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.640520 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.640737 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk575\" (UniqueName: \"kubernetes.io/projected/7ac21b6b-e21a-43db-acf1-cce61bf188ef-kube-api-access-pk575\") pod \"test-operator-controller-manager-5cd6c7f4c8-5nnqb\" (UID: \"7ac21b6b-e21a-43db-acf1-cce61bf188ef\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.640809 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbl87\" (UniqueName: \"kubernetes.io/projected/596d3616-ddec-489c-be4d-7e340f9e2acb-kube-api-access-qbl87\") pod \"watcher-operator-controller-manager-647d45fc97-x65c4\" (UID: \"596d3616-ddec-489c-be4d-7e340f9e2acb\") " pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.640838 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v75dw\" (UniqueName: \"kubernetes.io/projected/230b098e-8a89-417e-b5aa-994695273779-kube-api-access-v75dw\") pod \"swift-operator-controller-manager-d77b94747-gcvkp\" (UID: \"230b098e-8a89-417e-b5aa-994695273779\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.640875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8kvq\" (UniqueName: \"kubernetes.io/projected/5885db97-a86c-482e-9851-2d8351dc0c3a-kube-api-access-z8kvq\") pod \"telemetry-operator-controller-manager-76cc84c6bb-mc5kd\" (UID: \"5885db97-a86c-482e-9851-2d8351dc0c3a\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.641180 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.654851 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.673447 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.677542 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.678781 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.688399 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-xg9bb" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.688644 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v75dw\" (UniqueName: \"kubernetes.io/projected/230b098e-8a89-417e-b5aa-994695273779-kube-api-access-v75dw\") pod \"swift-operator-controller-manager-d77b94747-gcvkp\" (UID: \"230b098e-8a89-417e-b5aa-994695273779\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.695798 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn"] Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.706041 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8kvq\" (UniqueName: \"kubernetes.io/projected/5885db97-a86c-482e-9851-2d8351dc0c3a-kube-api-access-z8kvq\") pod \"telemetry-operator-controller-manager-76cc84c6bb-mc5kd\" (UID: \"5885db97-a86c-482e-9851-2d8351dc0c3a\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.711737 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.722591 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.744289 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6jgt\" (UniqueName: \"kubernetes.io/projected/e15f3b97-0859-4f12-87cd-514fab3d75aa-kube-api-access-h6jgt\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.744349 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk575\" (UniqueName: \"kubernetes.io/projected/7ac21b6b-e21a-43db-acf1-cce61bf188ef-kube-api-access-pk575\") pod \"test-operator-controller-manager-5cd6c7f4c8-5nnqb\" (UID: \"7ac21b6b-e21a-43db-acf1-cce61bf188ef\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.744418 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.744441 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbl87\" (UniqueName: \"kubernetes.io/projected/596d3616-ddec-489c-be4d-7e340f9e2acb-kube-api-access-qbl87\") pod \"watcher-operator-controller-manager-647d45fc97-x65c4\" (UID: \"596d3616-ddec-489c-be4d-7e340f9e2acb\") " pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.744485 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.744540 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.744709 4926 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.744771 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert podName:c67a3051-deee-4c35-b2fd-73f0f96ccbac nodeName:}" failed. No retries permitted until 2025-11-25 18:28:02.744747229 +0000 UTC m=+913.130260834 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert") pod "infra-operator-controller-manager-57548d458d-l4vqr" (UID: "c67a3051-deee-4c35-b2fd-73f0f96ccbac") : secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.780846 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.782471 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.790384 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbl87\" (UniqueName: \"kubernetes.io/projected/596d3616-ddec-489c-be4d-7e340f9e2acb-kube-api-access-qbl87\") pod \"watcher-operator-controller-manager-647d45fc97-x65c4\" (UID: \"596d3616-ddec-489c-be4d-7e340f9e2acb\") " pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.796442 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.809303 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk575\" (UniqueName: \"kubernetes.io/projected/7ac21b6b-e21a-43db-acf1-cce61bf188ef-kube-api-access-pk575\") pod \"test-operator-controller-manager-5cd6c7f4c8-5nnqb\" (UID: \"7ac21b6b-e21a-43db-acf1-cce61bf188ef\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.825914 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.846371 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.846451 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.846497 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6jgt\" (UniqueName: \"kubernetes.io/projected/e15f3b97-0859-4f12-87cd-514fab3d75aa-kube-api-access-h6jgt\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.846534 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzv45\" (UniqueName: \"kubernetes.io/projected/abc17280-a647-4d60-8a1a-d01505970238-kube-api-access-pzv45\") pod \"rabbitmq-cluster-operator-manager-668c99d594-v9gvn\" (UID: \"abc17280-a647-4d60-8a1a-d01505970238\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.847686 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.849050 4926 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.849126 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:02.349100957 +0000 UTC m=+912.734614562 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "metrics-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.849975 4926 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: E1125 18:28:01.850088 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:02.350055654 +0000 UTC m=+912.735569439 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "webhook-server-cert" not found Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.851610 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.853273 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.876838 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.897735 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6jgt\" (UniqueName: \"kubernetes.io/projected/e15f3b97-0859-4f12-87cd-514fab3d75aa-kube-api-access-h6jgt\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.949667 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzv45\" (UniqueName: \"kubernetes.io/projected/abc17280-a647-4d60-8a1a-d01505970238-kube-api-access-pzv45\") pod \"rabbitmq-cluster-operator-manager-668c99d594-v9gvn\" (UID: \"abc17280-a647-4d60-8a1a-d01505970238\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" Nov 25 18:28:01 crc kubenswrapper[4926]: I1125 18:28:01.973822 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzv45\" (UniqueName: \"kubernetes.io/projected/abc17280-a647-4d60-8a1a-d01505970238-kube-api-access-pzv45\") pod \"rabbitmq-cluster-operator-manager-668c99d594-v9gvn\" (UID: \"abc17280-a647-4d60-8a1a-d01505970238\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.053221 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.053455 4926 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.053582 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert podName:4e869634-c2f9-4248-8ad7-dd9af0315f2b nodeName:}" failed. No retries permitted until 2025-11-25 18:28:03.053545347 +0000 UTC m=+913.439059002 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" (UID: "4e869634-c2f9-4248-8ad7-dd9af0315f2b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.229449 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.359279 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.359358 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.359567 4926 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.359673 4926 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.359687 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:03.359662792 +0000 UTC m=+913.745176397 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "webhook-server-cert" not found Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.359944 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:03.359869887 +0000 UTC m=+913.745383542 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "metrics-server-cert" not found Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.389805 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b"] Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.389852 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-bpsp8"] Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.389882 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp"] Nov 25 18:28:02 crc kubenswrapper[4926]: W1125 18:28:02.415726 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8395389_762a_497d_972e_0987350a9a00.slice/crio-757867239e3916c115c17aa3ef406c66bb1ee708db75cf3959e4237504af5b3c WatchSource:0}: Error finding container 757867239e3916c115c17aa3ef406c66bb1ee708db75cf3959e4237504af5b3c: Status 404 returned error can't find the container with id 757867239e3916c115c17aa3ef406c66bb1ee708db75cf3959e4237504af5b3c Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.416887 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:28:02 crc kubenswrapper[4926]: W1125 18:28:02.421653 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod421c1930_795c_4e93_9865_bff40d49ddf5.slice/crio-6610f9379783b10ba560e892f9a0426eb50a5244b4879728c73c5d98db3739ca WatchSource:0}: Error finding container 6610f9379783b10ba560e892f9a0426eb50a5244b4879728c73c5d98db3739ca: Status 404 returned error can't find the container with id 6610f9379783b10ba560e892f9a0426eb50a5244b4879728c73c5d98db3739ca Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.708225 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t"] Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.721857 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl"] Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.730455 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx"] Nov 25 18:28:02 crc kubenswrapper[4926]: I1125 18:28:02.766176 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.766450 4926 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:02 crc kubenswrapper[4926]: E1125 18:28:02.766529 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert podName:c67a3051-deee-4c35-b2fd-73f0f96ccbac nodeName:}" failed. No retries permitted until 2025-11-25 18:28:04.766505276 +0000 UTC m=+915.152018881 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert") pod "infra-operator-controller-manager-57548d458d-l4vqr" (UID: "c67a3051-deee-4c35-b2fd-73f0f96ccbac") : secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.044343 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.072123 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.072854 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.073108 4926 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.073187 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert podName:4e869634-c2f9-4248-8ad7-dd9af0315f2b nodeName:}" failed. No retries permitted until 2025-11-25 18:28:05.073162265 +0000 UTC m=+915.458675870 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" (UID: "4e869634-c2f9-4248-8ad7-dd9af0315f2b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.078457 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.085796 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.097563 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.110528 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.120909 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.124553 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.130957 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.135649 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr"] Nov 25 18:28:03 crc kubenswrapper[4926]: W1125 18:28:03.144693 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5859a238_ed77_4ef7_ac69_295bd1c875c3.slice/crio-e3f6d2ee089a62d72280d755619888d028d1e8a0e0b76420a1c999769e4d35fd WatchSource:0}: Error finding container e3f6d2ee089a62d72280d755619888d028d1e8a0e0b76420a1c999769e4d35fd: Status 404 returned error can't find the container with id e3f6d2ee089a62d72280d755619888d028d1e8a0e0b76420a1c999769e4d35fd Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.144890 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb"] Nov 25 18:28:03 crc kubenswrapper[4926]: W1125 18:28:03.148719 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9d1a5dc_de6e_45fa_ab5d_1de529f40894.slice/crio-7e808a0045939bb65322161f1334f5c08671b901b4b5aa9fa88b394e58e8f1a4 WatchSource:0}: Error finding container 7e808a0045939bb65322161f1334f5c08671b901b4b5aa9fa88b394e58e8f1a4: Status 404 returned error can't find the container with id 7e808a0045939bb65322161f1334f5c08671b901b4b5aa9fa88b394e58e8f1a4 Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.148788 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.155508 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc"] Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.157811 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn"] Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.163595 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xgwzh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-k8j22_openstack-operators(74627669-e952-4db6-b082-5e7bd38b03b3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: W1125 18:28:03.169568 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc613eed5_f72e_4b4d_8283_5aa4e6241157.slice/crio-b24b861314f35ed6f18ec13efa9dc4f4392beddbdd0a961b925a13bc5f255b27 WatchSource:0}: Error finding container b24b861314f35ed6f18ec13efa9dc4f4392beddbdd0a961b925a13bc5f255b27: Status 404 returned error can't find the container with id b24b861314f35ed6f18ec13efa9dc4f4392beddbdd0a961b925a13bc5f255b27 Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.173259 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xgwzh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-k8j22_openstack-operators(74627669-e952-4db6-b082-5e7bd38b03b3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.174601 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.174830 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pk575,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-5nnqb_openstack-operators(7ac21b6b-e21a-43db-acf1-cce61bf188ef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.182454 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xtht6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-gr8fl_openstack-operators(9dfe0bac-7a60-47c9-bef9-e34a75d23521): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.182532 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pk575,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-5nnqb_openstack-operators(7ac21b6b-e21a-43db-acf1-cce61bf188ef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.183789 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.188821 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xtht6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-gr8fl_openstack-operators(9dfe0bac-7a60-47c9-bef9-e34a75d23521): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.188995 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cdz9b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_openstack-operators(c613eed5-f72e-4b4d-8283-5aa4e6241157): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.190170 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" podUID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" Nov 25 18:28:03 crc kubenswrapper[4926]: W1125 18:28:03.191876 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabc17280_a647_4d60_8a1a_d01505970238.slice/crio-4b2d7f2aa6bc6c0d0ce5d2c75bdbca6253827908b4fda001736c4a2ba8f90650 WatchSource:0}: Error finding container 4b2d7f2aa6bc6c0d0ce5d2c75bdbca6253827908b4fda001736c4a2ba8f90650: Status 404 returned error can't find the container with id 4b2d7f2aa6bc6c0d0ce5d2c75bdbca6253827908b4fda001736c4a2ba8f90650 Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.193167 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cdz9b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_openstack-operators(c613eed5-f72e-4b4d-8283-5aa4e6241157): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.194165 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.27:5001/openstack-k8s-operators/watcher-operator:b011619a365e60582fc0532b8a73be6f1329af85,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qbl87,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-647d45fc97-x65c4_openstack-operators(596d3616-ddec-489c-be4d-7e340f9e2acb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.194293 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.197754 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qbl87,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-647d45fc97-x65c4_openstack-operators(596d3616-ddec-489c-be4d-7e340f9e2acb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.197771 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cvvbx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-rl7hc_openstack-operators(306a2bb2-20b9-436d-809a-55499e85e4d6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.199943 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.215811 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cvvbx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-rl7hc_openstack-operators(306a2bb2-20b9-436d-809a-55499e85e4d6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.217181 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.380691 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.380803 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.381030 4926 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.381103 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:05.381078529 +0000 UTC m=+915.766592134 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "webhook-server-cert" not found Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.381563 4926 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.381596 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:05.381586273 +0000 UTC m=+915.767099878 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "metrics-server-cert" not found Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.397963 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" event={"ID":"9dfe0bac-7a60-47c9-bef9-e34a75d23521","Type":"ContainerStarted","Data":"6eadcc6da9c0c081134b487bce0bad0719b3f7eba57c9b6f0a1eec0fa726670e"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.400616 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerStarted","Data":"6610f9379783b10ba560e892f9a0426eb50a5244b4879728c73c5d98db3739ca"} Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.400864 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" podUID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.406373 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerStarted","Data":"175b4f82f3d11934323471ab15bf711f4138025991d2ef5643d50fe4d60dd579"} Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.409282 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.410198 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerStarted","Data":"93f3fea79156bb5655f84debbc5f31a19c29b726a88c702d86214a0358693a6d"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.411475 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerStarted","Data":"e3f6d2ee089a62d72280d755619888d028d1e8a0e0b76420a1c999769e4d35fd"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.413640 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerStarted","Data":"fe4d7610a68f6079fc4fa1916e147dbc02c4253d877f05c24a904b861bf16aad"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.417158 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" event={"ID":"7ac21b6b-e21a-43db-acf1-cce61bf188ef","Type":"ContainerStarted","Data":"faa799d3bc588b0a6e7a658df423ed88f3524c720eb9c3aedfc414a3ee06ec38"} Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.420927 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.435046 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerStarted","Data":"7e808a0045939bb65322161f1334f5c08671b901b4b5aa9fa88b394e58e8f1a4"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.444888 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" event={"ID":"3b14286a-e339-4bd3-835c-67287c341869","Type":"ContainerStarted","Data":"c14eca217788a6448d87f0387f1be18cbec237b2b4c36c556c2860fe81205984"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.446652 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerStarted","Data":"2c437a76b92e0f74f47f62bea394407bb6886611122d53eb42c818e60b9bac53"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.457934 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerStarted","Data":"e1d52bb2e0fe38b5b1b109d7497b749f5d4e338672d21167685e78d82da22091"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.459947 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" event={"ID":"abc17280-a647-4d60-8a1a-d01505970238","Type":"ContainerStarted","Data":"4b2d7f2aa6bc6c0d0ce5d2c75bdbca6253827908b4fda001736c4a2ba8f90650"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.462128 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerStarted","Data":"14d780a45d380dc7541286d46ac9b61d9abbc274b4cd66327d92d7177b8d3236"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.464021 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerStarted","Data":"c79e869156e9dca2ae3e266faa4ba24f9b222db115622a769559802e6e006438"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.467626 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerStarted","Data":"b24b861314f35ed6f18ec13efa9dc4f4392beddbdd0a961b925a13bc5f255b27"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.470407 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerStarted","Data":"757867239e3916c115c17aa3ef406c66bb1ee708db75cf3959e4237504af5b3c"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.473846 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerStarted","Data":"923e06e1520de58290aa0bdfa9fdaebf3b4300362af268d21f74f389422f2467"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.476362 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerStarted","Data":"35e2871030c1ef624f3ca98b248db728a5182e61fb816639bba7f9bef4904eac"} Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.478980 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerStarted","Data":"137319591fe1f8eb62f966fdff76539fe6edff56eebcc3e15ca90d67d4790573"} Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.478994 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/openstack-k8s-operators/watcher-operator:b011619a365e60582fc0532b8a73be6f1329af85\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.482352 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.484300 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerStarted","Data":"f1ec0a18acda6e4992f33735fcff2fbd2862441896204bc36fc7187f511ec3d2"} Nov 25 18:28:03 crc kubenswrapper[4926]: E1125 18:28:03.486770 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.549673 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:28:03 crc kubenswrapper[4926]: I1125 18:28:03.549750 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.517674 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.517801 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" podUID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.517853 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.517900 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.517950 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.518001 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/openstack-k8s-operators/watcher-operator:b011619a365e60582fc0532b8a73be6f1329af85\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" Nov 25 18:28:04 crc kubenswrapper[4926]: I1125 18:28:04.836957 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.837179 4926 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:04 crc kubenswrapper[4926]: E1125 18:28:04.837269 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert podName:c67a3051-deee-4c35-b2fd-73f0f96ccbac nodeName:}" failed. No retries permitted until 2025-11-25 18:28:08.837247155 +0000 UTC m=+919.222760900 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert") pod "infra-operator-controller-manager-57548d458d-l4vqr" (UID: "c67a3051-deee-4c35-b2fd-73f0f96ccbac") : secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:05 crc kubenswrapper[4926]: I1125 18:28:05.143445 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:05 crc kubenswrapper[4926]: E1125 18:28:05.143704 4926 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:05 crc kubenswrapper[4926]: E1125 18:28:05.143770 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert podName:4e869634-c2f9-4248-8ad7-dd9af0315f2b nodeName:}" failed. No retries permitted until 2025-11-25 18:28:09.14374779 +0000 UTC m=+919.529261395 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" (UID: "4e869634-c2f9-4248-8ad7-dd9af0315f2b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:05 crc kubenswrapper[4926]: I1125 18:28:05.466166 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:05 crc kubenswrapper[4926]: I1125 18:28:05.466236 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:05 crc kubenswrapper[4926]: E1125 18:28:05.466412 4926 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 18:28:05 crc kubenswrapper[4926]: E1125 18:28:05.466438 4926 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 18:28:05 crc kubenswrapper[4926]: E1125 18:28:05.466487 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:09.466461066 +0000 UTC m=+919.851974671 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "webhook-server-cert" not found Nov 25 18:28:05 crc kubenswrapper[4926]: E1125 18:28:05.466538 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:09.466508467 +0000 UTC m=+919.852022072 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "metrics-server-cert" not found Nov 25 18:28:08 crc kubenswrapper[4926]: I1125 18:28:08.927450 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:08 crc kubenswrapper[4926]: E1125 18:28:08.927713 4926 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:08 crc kubenswrapper[4926]: E1125 18:28:08.928160 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert podName:c67a3051-deee-4c35-b2fd-73f0f96ccbac nodeName:}" failed. No retries permitted until 2025-11-25 18:28:16.928130289 +0000 UTC m=+927.313643894 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert") pod "infra-operator-controller-manager-57548d458d-l4vqr" (UID: "c67a3051-deee-4c35-b2fd-73f0f96ccbac") : secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:09 crc kubenswrapper[4926]: I1125 18:28:09.231810 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:09 crc kubenswrapper[4926]: E1125 18:28:09.232092 4926 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:09 crc kubenswrapper[4926]: E1125 18:28:09.234732 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert podName:4e869634-c2f9-4248-8ad7-dd9af0315f2b nodeName:}" failed. No retries permitted until 2025-11-25 18:28:17.234673435 +0000 UTC m=+927.620187040 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" (UID: "4e869634-c2f9-4248-8ad7-dd9af0315f2b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 18:28:09 crc kubenswrapper[4926]: I1125 18:28:09.537352 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:09 crc kubenswrapper[4926]: I1125 18:28:09.537519 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:09 crc kubenswrapper[4926]: E1125 18:28:09.537533 4926 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 18:28:09 crc kubenswrapper[4926]: E1125 18:28:09.537612 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:17.537586591 +0000 UTC m=+927.923100196 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "metrics-server-cert" not found Nov 25 18:28:09 crc kubenswrapper[4926]: E1125 18:28:09.537772 4926 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 18:28:09 crc kubenswrapper[4926]: E1125 18:28:09.537896 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs podName:e15f3b97-0859-4f12-87cd-514fab3d75aa nodeName:}" failed. No retries permitted until 2025-11-25 18:28:17.537862369 +0000 UTC m=+927.923376034 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs") pod "openstack-operator-controller-manager-79c67b7c89-tcqww" (UID: "e15f3b97-0859-4f12-87cd-514fab3d75aa") : secret "webhook-server-cert" not found Nov 25 18:28:15 crc kubenswrapper[4926]: E1125 18:28:15.062912 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:45ae665ce2ea81aef212ee402cb02693ee49001a7c88c40c9598ff2859b838a2" Nov 25 18:28:15 crc kubenswrapper[4926]: E1125 18:28:15.064119 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:45ae665ce2ea81aef212ee402cb02693ee49001a7c88c40c9598ff2859b838a2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g5b9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-589cbd6b5b-8w6rx_openstack-operators(c8322c05-5b96-4489-87a7-1677f90df80c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:28:15 crc kubenswrapper[4926]: E1125 18:28:15.789439 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c" Nov 25 18:28:15 crc kubenswrapper[4926]: E1125 18:28:15.789691 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zgg7m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7b64f4fb85-hhl9b_openstack-operators(c8395389-762a-497d-972e-0987350a9a00): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:28:16 crc kubenswrapper[4926]: E1125 18:28:16.313225 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6" Nov 25 18:28:16 crc kubenswrapper[4926]: E1125 18:28:16.313454 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ljp24,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-56897c768d-9f2dg_openstack-operators(d992fc2a-a506-4c10-a8fa-1e3416074e73): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:28:16 crc kubenswrapper[4926]: E1125 18:28:16.915920 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:ec4e5c911c1d0f1ea211a04b251a9d2e95b69d141c1caf07a0381693b2d6368b" Nov 25 18:28:16 crc kubenswrapper[4926]: E1125 18:28:16.916770 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:ec4e5c911c1d0f1ea211a04b251a9d2e95b69d141c1caf07a0381693b2d6368b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cpgjq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-955677c94-bpsp8_openstack-operators(e949ca02-dbd2-4361-8b44-a498d1ec4c13): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:28:16 crc kubenswrapper[4926]: I1125 18:28:16.956011 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:16 crc kubenswrapper[4926]: E1125 18:28:16.956222 4926 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:16 crc kubenswrapper[4926]: E1125 18:28:16.956300 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert podName:c67a3051-deee-4c35-b2fd-73f0f96ccbac nodeName:}" failed. No retries permitted until 2025-11-25 18:28:32.956265418 +0000 UTC m=+943.341779023 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert") pod "infra-operator-controller-manager-57548d458d-l4vqr" (UID: "c67a3051-deee-4c35-b2fd-73f0f96ccbac") : secret "infra-operator-webhook-server-cert" not found Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.260152 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.281633 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4e869634-c2f9-4248-8ad7-dd9af0315f2b-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74\" (UID: \"4e869634-c2f9-4248-8ad7-dd9af0315f2b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.569198 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.569366 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.573351 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-webhook-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.578060 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e15f3b97-0859-4f12-87cd-514fab3d75aa-metrics-certs\") pod \"openstack-operator-controller-manager-79c67b7c89-tcqww\" (UID: \"e15f3b97-0859-4f12-87cd-514fab3d75aa\") " pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.578946 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.653757 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" event={"ID":"abc17280-a647-4d60-8a1a-d01505970238","Type":"ContainerStarted","Data":"9b3870ae68423320d87cdd5c3a13a8bfb425ee7a43670235d9404ce8009e75cf"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.668801 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerStarted","Data":"4a11c7a32b52da25f55657b390f56bfc989ba7e9902399ab5cd9c90dafd9911f"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.671577 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" podStartSLOduration=2.943128834 podStartE2EDuration="16.67156271s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.223622915 +0000 UTC m=+913.609136520" lastFinishedPulling="2025-11-25 18:28:16.952056791 +0000 UTC m=+927.337570396" observedRunningTime="2025-11-25 18:28:17.671401666 +0000 UTC m=+928.056915271" watchObservedRunningTime="2025-11-25 18:28:17.67156271 +0000 UTC m=+928.057076315" Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.685942 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerStarted","Data":"57ba6053b6d768094587b78baa67e67f682c7f8d16bf738b7b7152a97a7a56b2"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.686995 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerStarted","Data":"fc4d40c0f9343c6adbc15c21e42091c214d9cc4e696c2eb9b10c24d263495568"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.687837 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerStarted","Data":"88deed01138ef7fdf9e97905c467d5fc72ef2b2d6252b40b5f62375fee4ef227"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.688657 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerStarted","Data":"78b7d6650635333098268bbb9a192fcf36d9915ac328aeb59802be4c4076ea54"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.690321 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" event={"ID":"3b14286a-e339-4bd3-835c-67287c341869","Type":"ContainerStarted","Data":"7942fd3051661ba979dd20a373ed0ebb98018b89a78e619a9c6c2c11c6e7b104"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.737106 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerStarted","Data":"a703b3ca21c7188fc90a832e0b74a11850db4ca3065ab85f9996a985bc86c604"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.743139 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerStarted","Data":"c7c18cc5dacf53993e8b01e0ffa88f9592749f627e6c3a7e0d872ab42c51668b"} Nov 25 18:28:17 crc kubenswrapper[4926]: I1125 18:28:17.827018 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:19 crc kubenswrapper[4926]: I1125 18:28:19.482045 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww"] Nov 25 18:28:19 crc kubenswrapper[4926]: W1125 18:28:19.621388 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode15f3b97_0859_4f12_87cd_514fab3d75aa.slice/crio-03b282a4b38ac439d62d97626a7a44b0749ff1db75950ad812c00308c219bd61 WatchSource:0}: Error finding container 03b282a4b38ac439d62d97626a7a44b0749ff1db75950ad812c00308c219bd61: Status 404 returned error can't find the container with id 03b282a4b38ac439d62d97626a7a44b0749ff1db75950ad812c00308c219bd61 Nov 25 18:28:19 crc kubenswrapper[4926]: I1125 18:28:19.692107 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74"] Nov 25 18:28:19 crc kubenswrapper[4926]: I1125 18:28:19.777331 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerStarted","Data":"688df1ca7e2c9e28a17c9c8873994c87fd5799073043ed01c30bef3ed85c7407"} Nov 25 18:28:19 crc kubenswrapper[4926]: I1125 18:28:19.783387 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" event={"ID":"e15f3b97-0859-4f12-87cd-514fab3d75aa","Type":"ContainerStarted","Data":"03b282a4b38ac439d62d97626a7a44b0749ff1db75950ad812c00308c219bd61"} Nov 25 18:28:20 crc kubenswrapper[4926]: I1125 18:28:20.791666 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" event={"ID":"4e869634-c2f9-4248-8ad7-dd9af0315f2b","Type":"ContainerStarted","Data":"5ecee30073fe782a8d134dd64ee068151cf8229e777a396fc46ba3b606df1778"} Nov 25 18:28:27 crc kubenswrapper[4926]: E1125 18:28:27.734107 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" Nov 25 18:28:27 crc kubenswrapper[4926]: E1125 18:28:27.762704 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" Nov 25 18:28:27 crc kubenswrapper[4926]: I1125 18:28:27.846924 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerStarted","Data":"971341d0fbe10de9cf70b5d35c40e6b9d1ce52622b95ab03bc07511c13c4fd88"} Nov 25 18:28:27 crc kubenswrapper[4926]: I1125 18:28:27.851144 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" event={"ID":"e15f3b97-0859-4f12-87cd-514fab3d75aa","Type":"ContainerStarted","Data":"62ee1f8d5cb505d935396baebef3bd748835ef377da13a0a40c81288f3c7f9ad"} Nov 25 18:28:27 crc kubenswrapper[4926]: I1125 18:28:27.851775 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:27 crc kubenswrapper[4926]: I1125 18:28:27.853256 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerStarted","Data":"7d174004b7d7db04543b008f2e8d2f2a69722d9d40160d47513b80fc37c9aef4"} Nov 25 18:28:27 crc kubenswrapper[4926]: I1125 18:28:27.856084 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerStarted","Data":"7d32ed9d7c97edc95b6d39ad1cc88a376fd3dcbea77a0e9db77263fe120856f7"} Nov 25 18:28:27 crc kubenswrapper[4926]: I1125 18:28:27.857692 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" event={"ID":"9dfe0bac-7a60-47c9-bef9-e34a75d23521","Type":"ContainerStarted","Data":"afb872d1072f1092b17ff41b103e9985cf27a560cb10b1a33bc68b0903230f95"} Nov 25 18:28:28 crc kubenswrapper[4926]: E1125 18:28:28.016461 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" Nov 25 18:28:28 crc kubenswrapper[4926]: E1125 18:28:28.190218 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.871845 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" event={"ID":"7ac21b6b-e21a-43db-acf1-cce61bf188ef","Type":"ContainerStarted","Data":"fe7de231f90153efe556ecbd82e9a9aac08ff78998823dc181be1c432f0e3255"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.882466 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerStarted","Data":"7414c42ba3a576d3c80de7087b4770de468ca801765bc5304a30162cc3a3bde6"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.882558 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.885417 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.886922 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerStarted","Data":"990e554fc046c7f61c9d159f735f5f52deb1a3516b249024a6c1010cf1d2691a"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.894851 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerStarted","Data":"7a2ed56261b651d13db086dd8a2d068554707b64389754c5755216cc0d8f0ecb"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.895467 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.901609 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.901748 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerStarted","Data":"36d5a4029a03cefd233b081cf058512bd816348c086fb84f67bd9bc68e0d3f57"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.906562 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podStartSLOduration=27.906548864 podStartE2EDuration="27.906548864s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:28:27.93640097 +0000 UTC m=+938.321914575" watchObservedRunningTime="2025-11-25 18:28:28.906548864 +0000 UTC m=+939.292062469" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.918833 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" event={"ID":"4e869634-c2f9-4248-8ad7-dd9af0315f2b","Type":"ContainerStarted","Data":"d33a3388e5be48ae779ab3689c51486c6d865e7005fd45b826b74f00d8c84e5e"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.934301 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerStarted","Data":"186002db764b089c0e3729b88604b29cb78e9f1515d02f2d27a7ac362b272f03"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.951870 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podStartSLOduration=4.063491469 podStartE2EDuration="28.951846162s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:02.426454528 +0000 UTC m=+912.811968123" lastFinishedPulling="2025-11-25 18:28:27.314809191 +0000 UTC m=+937.700322816" observedRunningTime="2025-11-25 18:28:28.911844301 +0000 UTC m=+939.297357926" watchObservedRunningTime="2025-11-25 18:28:28.951846162 +0000 UTC m=+939.337359767" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.952731 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerStarted","Data":"d456a2a03ce7bbe75c201bdc01e7c8c1e982045701592916eeda6a8cf7490442"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.954160 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.961977 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerStarted","Data":"e40727dcc83eb165da62582b8c65f6f4040f011590a65c65c443f456aacd75a4"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.963717 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.968146 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.979659 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" event={"ID":"3b14286a-e339-4bd3-835c-67287c341869","Type":"ContainerStarted","Data":"a987f1fa06e5f3a1b727aeaed667578086c225f46daec17915d644f61d169beb"} Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.980578 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 18:28:28 crc kubenswrapper[4926]: I1125 18:28:28.981325 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.002240 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.003578 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerStarted","Data":"30cb023ac6bc185e0bdee42576a8e8ae67ff00d6c22cd65550bfcb64bfaf1066"} Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.059378 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podStartSLOduration=4.610485665 podStartE2EDuration="29.059352129s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:02.723180641 +0000 UTC m=+913.108694236" lastFinishedPulling="2025-11-25 18:28:27.172047095 +0000 UTC m=+937.557560700" observedRunningTime="2025-11-25 18:28:29.044851056 +0000 UTC m=+939.430364671" watchObservedRunningTime="2025-11-25 18:28:29.059352129 +0000 UTC m=+939.444865734" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.140416 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podStartSLOduration=4.982577642 podStartE2EDuration="29.140371129s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.160977424 +0000 UTC m=+913.546491029" lastFinishedPulling="2025-11-25 18:28:27.318770901 +0000 UTC m=+937.704284516" observedRunningTime="2025-11-25 18:28:29.138202669 +0000 UTC m=+939.523716274" watchObservedRunningTime="2025-11-25 18:28:29.140371129 +0000 UTC m=+939.525884734" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.171680 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podStartSLOduration=3.972913455 podStartE2EDuration="28.171655559s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.101895563 +0000 UTC m=+913.487409158" lastFinishedPulling="2025-11-25 18:28:27.300637657 +0000 UTC m=+937.686151262" observedRunningTime="2025-11-25 18:28:29.164827469 +0000 UTC m=+939.550341074" watchObservedRunningTime="2025-11-25 18:28:29.171655559 +0000 UTC m=+939.557169164" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.383248 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerStarted","Data":"88b6255a1e48b16e2374584d3809a5f1d4a6ff341f490ebeda7827f25b57a903"} Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.384059 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.389128 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.407200 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerStarted","Data":"7c3f5f6908f29670cc972cd774e05d8651974681ae5f49d1de3f953399eff98c"} Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.433892 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" podStartSLOduration=4.84535057 podStartE2EDuration="29.433869104s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:02.721577317 +0000 UTC m=+913.107090922" lastFinishedPulling="2025-11-25 18:28:27.310095831 +0000 UTC m=+937.695609456" observedRunningTime="2025-11-25 18:28:29.195953084 +0000 UTC m=+939.581466689" watchObservedRunningTime="2025-11-25 18:28:29.433869104 +0000 UTC m=+939.819382709" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.434462 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podStartSLOduration=4.167068189 podStartE2EDuration="28.43445554s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.105560495 +0000 UTC m=+913.491074100" lastFinishedPulling="2025-11-25 18:28:27.372947836 +0000 UTC m=+937.758461451" observedRunningTime="2025-11-25 18:28:29.428819643 +0000 UTC m=+939.814333238" watchObservedRunningTime="2025-11-25 18:28:29.43445554 +0000 UTC m=+939.819969145" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.436047 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerStarted","Data":"f8bfa0f6fb88d30ad2bf29160555128359d4ea5e9aef9d7bee5119232f0c28a7"} Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.436177 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.459104 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 18:28:29 crc kubenswrapper[4926]: I1125 18:28:29.554160 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podStartSLOduration=5.270208704 podStartE2EDuration="29.554129145s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.104447964 +0000 UTC m=+913.489961569" lastFinishedPulling="2025-11-25 18:28:27.388368405 +0000 UTC m=+937.773882010" observedRunningTime="2025-11-25 18:28:29.510181024 +0000 UTC m=+939.895694629" watchObservedRunningTime="2025-11-25 18:28:29.554129145 +0000 UTC m=+939.939642750" Nov 25 18:28:32 crc kubenswrapper[4926]: I1125 18:28:32.994488 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:33 crc kubenswrapper[4926]: I1125 18:28:33.012282 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c67a3051-deee-4c35-b2fd-73f0f96ccbac-cert\") pod \"infra-operator-controller-manager-57548d458d-l4vqr\" (UID: \"c67a3051-deee-4c35-b2fd-73f0f96ccbac\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:33 crc kubenswrapper[4926]: I1125 18:28:33.041574 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:33 crc kubenswrapper[4926]: I1125 18:28:33.542436 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:28:33 crc kubenswrapper[4926]: I1125 18:28:33.543815 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:28:33 crc kubenswrapper[4926]: I1125 18:28:33.579643 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr"] Nov 25 18:28:34 crc kubenswrapper[4926]: I1125 18:28:34.496130 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerStarted","Data":"f4f9abd001c7aed8a325ddab641ac304694b5d3366c4ffd4498eacfbd1a6f204"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.506019 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" event={"ID":"9dfe0bac-7a60-47c9-bef9-e34a75d23521","Type":"ContainerStarted","Data":"5b31316a339a8ea4296a4d487e52b7fc4bd182d067b4b2f82f5e745cbec7b209"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.507265 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.508618 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerStarted","Data":"ca8da6f6a7513d8c23b4eb82db40d9aa8d27584618441719813ab7e73e9b91cf"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.510560 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerStarted","Data":"542db4b1b19a7189084fdb4090aa6a614b6e06472d66ac2adc3530835ef036e9"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.510763 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.512092 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.512535 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerStarted","Data":"3d36c6364d97b55cf9b72d177c1c258da09a68a79c726f97d2056622acc7e3e7"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.514150 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.514237 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" event={"ID":"4e869634-c2f9-4248-8ad7-dd9af0315f2b","Type":"ContainerStarted","Data":"1d2c46b2c0c4becdf6ebfaa0c7ec73365c4c86260ffea8e38413613eb8b9780d"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.515583 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" event={"ID":"7ac21b6b-e21a-43db-acf1-cce61bf188ef","Type":"ContainerStarted","Data":"e1d3f088781bd7dfd09d8535aaaf1600a8133c2755f2bcd4da6a4e1b27624997"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.517193 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerStarted","Data":"c6948f393a9574f110453f8fe0025337a9d98251266c7a3c248197499afd3203"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.517352 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.518949 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerStarted","Data":"ecc6d4b2eb53c97dc5e8ab35dc578963de0f064bbde6b2ca991b2fda87861bde"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.519740 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.521698 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerStarted","Data":"d7a4a33bf46e3abae7b7cac1ff8abd4092a64e6ecd5deed39bc40cab97e49788"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.522245 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.523601 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.523640 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.524251 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerStarted","Data":"2c4c2b3a5303c9a549e54819cfc40984bdce6ea47380f91082b6e6119479966d"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.524484 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.526063 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.526261 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerStarted","Data":"47c924922d10883b90f594da3571f86ab8a202a4d5096188229f622a543eac96"} Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.535618 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" podStartSLOduration=12.154227618 podStartE2EDuration="35.535590334s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.182198904 +0000 UTC m=+913.567712509" lastFinishedPulling="2025-11-25 18:28:26.56356162 +0000 UTC m=+936.949075225" observedRunningTime="2025-11-25 18:28:35.5333086 +0000 UTC m=+945.918822195" watchObservedRunningTime="2025-11-25 18:28:35.535590334 +0000 UTC m=+945.921103939" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.591856 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podStartSLOduration=12.266312021 podStartE2EDuration="35.591826626s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.197629032 +0000 UTC m=+913.583142637" lastFinishedPulling="2025-11-25 18:28:26.523143637 +0000 UTC m=+936.908657242" observedRunningTime="2025-11-25 18:28:35.587608369 +0000 UTC m=+945.973121984" watchObservedRunningTime="2025-11-25 18:28:35.591826626 +0000 UTC m=+945.977340221" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.622185 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podStartSLOduration=11.590690731 podStartE2EDuration="35.622158449s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.188758006 +0000 UTC m=+913.574271611" lastFinishedPulling="2025-11-25 18:28:27.220225724 +0000 UTC m=+937.605739329" observedRunningTime="2025-11-25 18:28:35.617461968 +0000 UTC m=+946.002975573" watchObservedRunningTime="2025-11-25 18:28:35.622158449 +0000 UTC m=+946.007672054" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.665737 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podStartSLOduration=10.482648129 podStartE2EDuration="34.665712808s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.19393606 +0000 UTC m=+913.579449665" lastFinishedPulling="2025-11-25 18:28:27.377000729 +0000 UTC m=+937.762514344" observedRunningTime="2025-11-25 18:28:35.659815225 +0000 UTC m=+946.045328850" watchObservedRunningTime="2025-11-25 18:28:35.665712808 +0000 UTC m=+946.051226403" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.702988 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podStartSLOduration=9.481250637 podStartE2EDuration="35.702958284s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:02.42041921 +0000 UTC m=+912.805932815" lastFinishedPulling="2025-11-25 18:28:28.642126857 +0000 UTC m=+939.027640462" observedRunningTime="2025-11-25 18:28:35.700218638 +0000 UTC m=+946.085732243" watchObservedRunningTime="2025-11-25 18:28:35.702958284 +0000 UTC m=+946.088471899" Nov 25 18:28:35 crc kubenswrapper[4926]: I1125 18:28:35.735191 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podStartSLOduration=11.612029213 podStartE2EDuration="35.735172308s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.159312608 +0000 UTC m=+913.544826213" lastFinishedPulling="2025-11-25 18:28:27.282455703 +0000 UTC m=+937.667969308" observedRunningTime="2025-11-25 18:28:35.730237131 +0000 UTC m=+946.115750746" watchObservedRunningTime="2025-11-25 18:28:35.735172308 +0000 UTC m=+946.120685913" Nov 25 18:28:36 crc kubenswrapper[4926]: I1125 18:28:36.550783 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podStartSLOduration=10.720044872 podStartE2EDuration="36.550736816s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:02.725316721 +0000 UTC m=+913.110830326" lastFinishedPulling="2025-11-25 18:28:28.556008665 +0000 UTC m=+938.941522270" observedRunningTime="2025-11-25 18:28:36.549212995 +0000 UTC m=+946.934726600" watchObservedRunningTime="2025-11-25 18:28:36.550736816 +0000 UTC m=+946.936250431" Nov 25 18:28:36 crc kubenswrapper[4926]: I1125 18:28:36.573351 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podStartSLOduration=12.224610045 podStartE2EDuration="35.573324004s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.174689755 +0000 UTC m=+913.560203360" lastFinishedPulling="2025-11-25 18:28:26.523403714 +0000 UTC m=+936.908917319" observedRunningTime="2025-11-25 18:28:36.568351607 +0000 UTC m=+946.953865222" watchObservedRunningTime="2025-11-25 18:28:36.573324004 +0000 UTC m=+946.958837619" Nov 25 18:28:36 crc kubenswrapper[4926]: I1125 18:28:36.595014 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podStartSLOduration=11.449921883 podStartE2EDuration="35.594992396s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.163448173 +0000 UTC m=+913.548961778" lastFinishedPulling="2025-11-25 18:28:27.308518666 +0000 UTC m=+937.694032291" observedRunningTime="2025-11-25 18:28:36.587426516 +0000 UTC m=+946.972940121" watchObservedRunningTime="2025-11-25 18:28:36.594992396 +0000 UTC m=+946.980506001" Nov 25 18:28:36 crc kubenswrapper[4926]: I1125 18:28:36.623057 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podStartSLOduration=28.582830522 podStartE2EDuration="35.623029815s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:20.335896211 +0000 UTC m=+930.721409826" lastFinishedPulling="2025-11-25 18:28:27.376095504 +0000 UTC m=+937.761609119" observedRunningTime="2025-11-25 18:28:36.619628631 +0000 UTC m=+947.005142246" watchObservedRunningTime="2025-11-25 18:28:36.623029815 +0000 UTC m=+947.008543420" Nov 25 18:28:36 crc kubenswrapper[4926]: I1125 18:28:36.641935 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podStartSLOduration=12.335028381 podStartE2EDuration="36.64190981s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.104713611 +0000 UTC m=+913.490227216" lastFinishedPulling="2025-11-25 18:28:27.41159503 +0000 UTC m=+937.797108645" observedRunningTime="2025-11-25 18:28:36.63976372 +0000 UTC m=+947.025277355" watchObservedRunningTime="2025-11-25 18:28:36.64190981 +0000 UTC m=+947.027423435" Nov 25 18:28:37 crc kubenswrapper[4926]: I1125 18:28:37.580559 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:37 crc kubenswrapper[4926]: I1125 18:28:37.587040 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 18:28:37 crc kubenswrapper[4926]: I1125 18:28:37.836307 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 18:28:40 crc kubenswrapper[4926]: I1125 18:28:40.565264 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerStarted","Data":"892cf2c781f958010bdc1bb34e3c650396e61eefe06323dd35850c78848bcfd5"} Nov 25 18:28:40 crc kubenswrapper[4926]: I1125 18:28:40.565766 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 18:28:40 crc kubenswrapper[4926]: I1125 18:28:40.567897 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerStarted","Data":"54dfc64f2d94f9d9ebb7fab0fe449487d912d8c416ffb0208c9e0889db721e1c"} Nov 25 18:28:40 crc kubenswrapper[4926]: I1125 18:28:40.568866 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 18:28:40 crc kubenswrapper[4926]: I1125 18:28:40.591493 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podStartSLOduration=3.15965384 podStartE2EDuration="39.591467707s" podCreationTimestamp="2025-11-25 18:28:01 +0000 UTC" firstStartedPulling="2025-11-25 18:28:03.102444748 +0000 UTC m=+913.487958353" lastFinishedPulling="2025-11-25 18:28:39.534258615 +0000 UTC m=+949.919772220" observedRunningTime="2025-11-25 18:28:40.58473245 +0000 UTC m=+950.970246075" watchObservedRunningTime="2025-11-25 18:28:40.591467707 +0000 UTC m=+950.976981312" Nov 25 18:28:40 crc kubenswrapper[4926]: I1125 18:28:40.606988 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podStartSLOduration=3.489022497 podStartE2EDuration="40.606966937s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:02.416630425 +0000 UTC m=+912.802144030" lastFinishedPulling="2025-11-25 18:28:39.534574865 +0000 UTC m=+949.920088470" observedRunningTime="2025-11-25 18:28:40.606134675 +0000 UTC m=+950.991648280" watchObservedRunningTime="2025-11-25 18:28:40.606966937 +0000 UTC m=+950.992480542" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.055328 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.126608 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.129051 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.181312 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.183240 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.580803 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerStarted","Data":"a8dbfdfab9fcce93abab835c82ca1aaac1b9e0170b509614af499e274e96fadc"} Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.580912 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerStarted","Data":"008a9fccfa7f8d5af7cc3867df37579f65425476b19a4aad6da69932129086c4"} Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.600602 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podStartSLOduration=34.321810062 podStartE2EDuration="41.600577933s" podCreationTimestamp="2025-11-25 18:28:00 +0000 UTC" firstStartedPulling="2025-11-25 18:28:33.585884566 +0000 UTC m=+943.971398171" lastFinishedPulling="2025-11-25 18:28:40.864652437 +0000 UTC m=+951.250166042" observedRunningTime="2025-11-25 18:28:41.595763628 +0000 UTC m=+951.981277233" watchObservedRunningTime="2025-11-25 18:28:41.600577933 +0000 UTC m=+951.986091538" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.723460 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.726812 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.851159 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 18:28:41 crc kubenswrapper[4926]: I1125 18:28:41.854644 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 18:28:42 crc kubenswrapper[4926]: I1125 18:28:42.592363 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:28:51 crc kubenswrapper[4926]: I1125 18:28:51.097349 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 18:28:51 crc kubenswrapper[4926]: I1125 18:28:51.715603 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 18:28:53 crc kubenswrapper[4926]: I1125 18:28:53.051341 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.541442 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.542169 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.542232 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.543253 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2a18d78481ed56ee9bbb8c78eb19b76e596d9f89645390e0fbbcd5362fea71a4"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.543340 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://2a18d78481ed56ee9bbb8c78eb19b76e596d9f89645390e0fbbcd5362fea71a4" gracePeriod=600 Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.782841 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="2a18d78481ed56ee9bbb8c78eb19b76e596d9f89645390e0fbbcd5362fea71a4" exitCode=0 Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.783452 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"2a18d78481ed56ee9bbb8c78eb19b76e596d9f89645390e0fbbcd5362fea71a4"} Nov 25 18:29:03 crc kubenswrapper[4926]: I1125 18:29:03.783555 4926 scope.go:117] "RemoveContainer" containerID="fe9604ac0593158a6b911432198090e6a5ba75c5f094643a6db976009ac5d9c3" Nov 25 18:29:04 crc kubenswrapper[4926]: I1125 18:29:04.797140 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"07a87485769ab3c18f3d0de8b8428276c4f53380d423cecc3238c93bfce01c6d"} Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.423037 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84c8bd76c9-rfsnl"] Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.426300 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.430266 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.430396 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.430647 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-rqwjb" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.430786 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.453893 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84c8bd76c9-rfsnl"] Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.478189 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f9f8f485-d9tv5"] Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.484359 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.503478 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.566589 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f9f8f485-d9tv5"] Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.605793 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-config\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.605859 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-dns-svc\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.605878 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkv6n\" (UniqueName: \"kubernetes.io/projected/a0b98179-3a67-4965-94c3-6192d4d3a135-kube-api-access-xkv6n\") pod \"dnsmasq-dns-84c8bd76c9-rfsnl\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.605901 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qncjk\" (UniqueName: \"kubernetes.io/projected/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-kube-api-access-qncjk\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.605937 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b98179-3a67-4965-94c3-6192d4d3a135-config\") pod \"dnsmasq-dns-84c8bd76c9-rfsnl\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.707331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b98179-3a67-4965-94c3-6192d4d3a135-config\") pod \"dnsmasq-dns-84c8bd76c9-rfsnl\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.707478 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-config\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.707521 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-dns-svc\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.707546 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkv6n\" (UniqueName: \"kubernetes.io/projected/a0b98179-3a67-4965-94c3-6192d4d3a135-kube-api-access-xkv6n\") pod \"dnsmasq-dns-84c8bd76c9-rfsnl\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.707575 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qncjk\" (UniqueName: \"kubernetes.io/projected/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-kube-api-access-qncjk\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.708835 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-config\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.709067 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-dns-svc\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.709958 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b98179-3a67-4965-94c3-6192d4d3a135-config\") pod \"dnsmasq-dns-84c8bd76c9-rfsnl\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.730354 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qncjk\" (UniqueName: \"kubernetes.io/projected/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-kube-api-access-qncjk\") pod \"dnsmasq-dns-f9f8f485-d9tv5\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.730548 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkv6n\" (UniqueName: \"kubernetes.io/projected/a0b98179-3a67-4965-94c3-6192d4d3a135-kube-api-access-xkv6n\") pod \"dnsmasq-dns-84c8bd76c9-rfsnl\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.750143 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:11 crc kubenswrapper[4926]: I1125 18:29:11.835955 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:12 crc kubenswrapper[4926]: I1125 18:29:12.242861 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84c8bd76c9-rfsnl"] Nov 25 18:29:12 crc kubenswrapper[4926]: I1125 18:29:12.345539 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f9f8f485-d9tv5"] Nov 25 18:29:12 crc kubenswrapper[4926]: W1125 18:29:12.360881 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e5b1615_b574_4cc7_ac61_e6c3cd2aef0a.slice/crio-1c68211cc6d05595287ffe7579b5072132808669d7f7ef13e16acf56f7c1ddef WatchSource:0}: Error finding container 1c68211cc6d05595287ffe7579b5072132808669d7f7ef13e16acf56f7c1ddef: Status 404 returned error can't find the container with id 1c68211cc6d05595287ffe7579b5072132808669d7f7ef13e16acf56f7c1ddef Nov 25 18:29:12 crc kubenswrapper[4926]: I1125 18:29:12.870714 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" event={"ID":"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a","Type":"ContainerStarted","Data":"1c68211cc6d05595287ffe7579b5072132808669d7f7ef13e16acf56f7c1ddef"} Nov 25 18:29:12 crc kubenswrapper[4926]: I1125 18:29:12.873215 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" event={"ID":"a0b98179-3a67-4965-94c3-6192d4d3a135","Type":"ContainerStarted","Data":"3dda8558b2573f887e289a71286be604a38cbd703237e5b395cc6a8a17582923"} Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.219291 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84c8bd76c9-rfsnl"] Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.269697 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fd4646b5-rk4lz"] Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.277741 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.301430 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fd4646b5-rk4lz"] Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.408457 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-config\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.408524 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55rrv\" (UniqueName: \"kubernetes.io/projected/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-kube-api-access-55rrv\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.408594 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-dns-svc\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.514113 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-dns-svc\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.515425 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-config\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.515454 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-dns-svc\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.515559 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55rrv\" (UniqueName: \"kubernetes.io/projected/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-kube-api-access-55rrv\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.516074 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-config\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.545195 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55rrv\" (UniqueName: \"kubernetes.io/projected/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-kube-api-access-55rrv\") pod \"dnsmasq-dns-fd4646b5-rk4lz\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.678132 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.725708 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f9f8f485-d9tv5"] Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.753342 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67df4d8c45-q7gpj"] Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.754646 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.781681 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67df4d8c45-q7gpj"] Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.821700 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-dns-svc\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.824665 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k4jr\" (UniqueName: \"kubernetes.io/projected/9dab0894-9d3e-48a2-a444-8da369542d2a-kube-api-access-2k4jr\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.824832 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-config\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.925844 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-config\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.925918 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-dns-svc\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.925949 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k4jr\" (UniqueName: \"kubernetes.io/projected/9dab0894-9d3e-48a2-a444-8da369542d2a-kube-api-access-2k4jr\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.927265 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-config\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.927822 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-dns-svc\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:15 crc kubenswrapper[4926]: I1125 18:29:15.956835 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k4jr\" (UniqueName: \"kubernetes.io/projected/9dab0894-9d3e-48a2-a444-8da369542d2a-kube-api-access-2k4jr\") pod \"dnsmasq-dns-67df4d8c45-q7gpj\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.082255 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67df4d8c45-q7gpj"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.082872 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.116627 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57fd94878f-v6npq"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.117987 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.132240 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-dns-svc\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.132287 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-config\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.132348 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhnmk\" (UniqueName: \"kubernetes.io/projected/9ced89d4-a75a-4c9b-b9e4-be11934799d3-kube-api-access-zhnmk\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.135984 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57fd94878f-v6npq"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.233985 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-dns-svc\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.234519 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-config\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.234598 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhnmk\" (UniqueName: \"kubernetes.io/projected/9ced89d4-a75a-4c9b-b9e4-be11934799d3-kube-api-access-zhnmk\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.235111 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-dns-svc\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.235914 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-config\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.281881 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhnmk\" (UniqueName: \"kubernetes.io/projected/9ced89d4-a75a-4c9b-b9e4-be11934799d3-kube-api-access-zhnmk\") pod \"dnsmasq-dns-57fd94878f-v6npq\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.315881 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fd4646b5-rk4lz"] Nov 25 18:29:16 crc kubenswrapper[4926]: W1125 18:29:16.362105 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b1b48ad_7c6a_43a0_8c30_83575173ddaf.slice/crio-4a38a6626476968003e76f6d54ea7dfc46dde519cf3ceb72994b4eb21afdef55 WatchSource:0}: Error finding container 4a38a6626476968003e76f6d54ea7dfc46dde519cf3ceb72994b4eb21afdef55: Status 404 returned error can't find the container with id 4a38a6626476968003e76f6d54ea7dfc46dde519cf3ceb72994b4eb21afdef55 Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.483444 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.483444 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.485157 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.490637 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.490728 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.490855 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.490926 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.490974 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.491080 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.491089 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-rr2qd" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.517927 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.645013 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67df4d8c45-q7gpj"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.645833 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.645970 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/38e40083-2404-4c67-88b5-41ccaf693c6e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.646062 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.646120 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndt9m\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-kube-api-access-ndt9m\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.646140 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.646183 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.646223 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/38e40083-2404-4c67-88b5-41ccaf693c6e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.646322 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.647961 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.648085 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.648137 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-config-data\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.749981 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750057 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndt9m\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-kube-api-access-ndt9m\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750090 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750124 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750166 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/38e40083-2404-4c67-88b5-41ccaf693c6e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750208 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750254 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750287 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750335 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-config-data\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750388 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.750431 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/38e40083-2404-4c67-88b5-41ccaf693c6e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.751567 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.751717 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.752269 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-config-data\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.752435 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.752794 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.753725 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.760775 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/38e40083-2404-4c67-88b5-41ccaf693c6e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.761608 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.767460 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndt9m\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-kube-api-access-ndt9m\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.775247 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.778329 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/38e40083-2404-4c67-88b5-41ccaf693c6e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.787530 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.823362 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.882526 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.884785 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.890162 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.891068 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-8294s" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.894648 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.896590 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.896723 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.896725 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.896740 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.903458 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.927690 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" event={"ID":"0b1b48ad-7c6a-43a0-8c30-83575173ddaf","Type":"ContainerStarted","Data":"4a38a6626476968003e76f6d54ea7dfc46dde519cf3ceb72994b4eb21afdef55"} Nov 25 18:29:16 crc kubenswrapper[4926]: I1125 18:29:16.931449 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" event={"ID":"9dab0894-9d3e-48a2-a444-8da369542d2a","Type":"ContainerStarted","Data":"9e37c52d80beb5fc5bb1ac7f70bbbb8b2b63ea545af9cef62b2d5cca5679a9ee"} Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.032312 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57fd94878f-v6npq"] Nov 25 18:29:17 crc kubenswrapper[4926]: W1125 18:29:17.051469 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ced89d4_a75a_4c9b_b9e4_be11934799d3.slice/crio-fca0e690ac622f5f1d6c91e4fdbde294af5746da157a68e0e4a31df6f1c2bca4 WatchSource:0}: Error finding container fca0e690ac622f5f1d6c91e4fdbde294af5746da157a68e0e4a31df6f1c2bca4: Status 404 returned error can't find the container with id fca0e690ac622f5f1d6c91e4fdbde294af5746da157a68e0e4a31df6f1c2bca4 Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058509 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e53ddff3-6cac-43f4-98c6-f909431098f1-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058585 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058620 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lppph\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-kube-api-access-lppph\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058755 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058798 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058840 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e53ddff3-6cac-43f4-98c6-f909431098f1-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058929 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.058969 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.059964 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.059997 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.060097 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162296 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e53ddff3-6cac-43f4-98c6-f909431098f1-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162351 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162451 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lppph\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-kube-api-access-lppph\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162492 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162510 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162536 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e53ddff3-6cac-43f4-98c6-f909431098f1-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162587 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162609 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162671 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162693 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162721 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.162997 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.163273 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.165069 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.165365 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.165393 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.165760 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e53ddff3-6cac-43f4-98c6-f909431098f1-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.167135 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.168867 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e53ddff3-6cac-43f4-98c6-f909431098f1-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.169024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.169472 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e53ddff3-6cac-43f4-98c6-f909431098f1-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.191547 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lppph\" (UniqueName: \"kubernetes.io/projected/e53ddff3-6cac-43f4-98c6-f909431098f1-kube-api-access-lppph\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.203461 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"e53ddff3-6cac-43f4-98c6-f909431098f1\") " pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.247297 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.249139 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.251943 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.252336 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.252631 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.255148 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.255196 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.255657 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.260680 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-vvtzv" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.275893 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.347530 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.366699 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8310425-a9bc-4c42-9caf-9c1a70041d2c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.366766 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8310425-a9bc-4c42-9caf-9c1a70041d2c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.366808 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.366875 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.366916 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.366945 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.366967 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.367000 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.367416 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.367452 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb9r6\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-kube-api-access-xb9r6\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.367481 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.468855 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.468955 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.468993 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469012 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469050 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469070 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469088 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb9r6\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-kube-api-access-xb9r6\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469107 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469127 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8310425-a9bc-4c42-9caf-9c1a70041d2c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469164 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8310425-a9bc-4c42-9caf-9c1a70041d2c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469225 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469708 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.469949 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.471222 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.473053 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.473471 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.473606 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.474178 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8310425-a9bc-4c42-9caf-9c1a70041d2c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.474920 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.475121 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8310425-a9bc-4c42-9caf-9c1a70041d2c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.476879 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.496986 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb9r6\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-kube-api-access-xb9r6\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.509911 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.512737 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:17 crc kubenswrapper[4926]: I1125 18:29:17.587009 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.016351 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"38e40083-2404-4c67-88b5-41ccaf693c6e","Type":"ContainerStarted","Data":"c536dea4f99cf17d00fc396bf472640854de9a24d33b3c83d3273913009aa3cd"} Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.031643 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" event={"ID":"9ced89d4-a75a-4c9b-b9e4-be11934799d3","Type":"ContainerStarted","Data":"fca0e690ac622f5f1d6c91e4fdbde294af5746da157a68e0e4a31df6f1c2bca4"} Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.100965 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.191677 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:29:18 crc kubenswrapper[4926]: W1125 18:29:18.287718 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8310425_a9bc_4c42_9caf_9c1a70041d2c.slice/crio-fe9c0e0519b8553cbc9abb2edfecb2d4efc4f4d113213ecb0d36a645ce8213eb WatchSource:0}: Error finding container fe9c0e0519b8553cbc9abb2edfecb2d4efc4f4d113213ecb0d36a645ce8213eb: Status 404 returned error can't find the container with id fe9c0e0519b8553cbc9abb2edfecb2d4efc4f4d113213ecb0d36a645ce8213eb Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.625212 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.628734 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.631982 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.632159 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-q6cdz" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.632937 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.633090 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.643247 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.652981 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.810972 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04996f9-1035-4982-bd9b-f96ee30cd663-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.811040 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.811084 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-kolla-config\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.811117 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04996f9-1035-4982-bd9b-f96ee30cd663-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.811134 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-config-data-default\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.811152 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b04996f9-1035-4982-bd9b-f96ee30cd663-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.811176 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.811223 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6458\" (UniqueName: \"kubernetes.io/projected/b04996f9-1035-4982-bd9b-f96ee30cd663-kube-api-access-z6458\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.913792 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.913897 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6458\" (UniqueName: \"kubernetes.io/projected/b04996f9-1035-4982-bd9b-f96ee30cd663-kube-api-access-z6458\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.913960 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04996f9-1035-4982-bd9b-f96ee30cd663-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.913999 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.914063 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-kolla-config\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.914120 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04996f9-1035-4982-bd9b-f96ee30cd663-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.914142 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b04996f9-1035-4982-bd9b-f96ee30cd663-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.914159 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-config-data-default\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.915761 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-config-data-default\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.916339 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.916865 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-kolla-config\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.917705 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b04996f9-1035-4982-bd9b-f96ee30cd663-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.923454 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b04996f9-1035-4982-bd9b-f96ee30cd663-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.941696 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04996f9-1035-4982-bd9b-f96ee30cd663-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.942830 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6458\" (UniqueName: \"kubernetes.io/projected/b04996f9-1035-4982-bd9b-f96ee30cd663-kube-api-access-z6458\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.946218 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04996f9-1035-4982-bd9b-f96ee30cd663-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:18 crc kubenswrapper[4926]: I1125 18:29:18.988325 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"b04996f9-1035-4982-bd9b-f96ee30cd663\") " pod="openstack/openstack-galera-0" Nov 25 18:29:19 crc kubenswrapper[4926]: I1125 18:29:19.072500 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"e53ddff3-6cac-43f4-98c6-f909431098f1","Type":"ContainerStarted","Data":"6948cdefabefda9de85a47bea17a3c8d71bb119b99eb5411b472e4b3fe4ff659"} Nov 25 18:29:19 crc kubenswrapper[4926]: I1125 18:29:19.079366 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8310425-a9bc-4c42-9caf-9c1a70041d2c","Type":"ContainerStarted","Data":"fe9c0e0519b8553cbc9abb2edfecb2d4efc4f4d113213ecb0d36a645ce8213eb"} Nov 25 18:29:19 crc kubenswrapper[4926]: I1125 18:29:19.254898 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 18:29:19 crc kubenswrapper[4926]: I1125 18:29:19.910778 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.022791 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.024583 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.027648 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.027910 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.028149 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-pktpc" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.029036 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.035222 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.107310 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b04996f9-1035-4982-bd9b-f96ee30cd663","Type":"ContainerStarted","Data":"4289ea96c2345e6602bb4eb53b5d647377156eddda4af1a8717039e6c2391f68"} Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182471 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac5169d3-6efd-4929-8f0a-b8cfae948182-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182552 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5169d3-6efd-4929-8f0a-b8cfae948182-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182586 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182636 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182680 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182760 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182787 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ac5169d3-6efd-4929-8f0a-b8cfae948182-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.182880 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw5l4\" (UniqueName: \"kubernetes.io/projected/ac5169d3-6efd-4929-8f0a-b8cfae948182-kube-api-access-qw5l4\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284093 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284184 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284215 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284238 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ac5169d3-6efd-4929-8f0a-b8cfae948182-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284259 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw5l4\" (UniqueName: \"kubernetes.io/projected/ac5169d3-6efd-4929-8f0a-b8cfae948182-kube-api-access-qw5l4\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284296 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac5169d3-6efd-4929-8f0a-b8cfae948182-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284336 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5169d3-6efd-4929-8f0a-b8cfae948182-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284366 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.284824 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.285394 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ac5169d3-6efd-4929-8f0a-b8cfae948182-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.286173 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.287315 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.287530 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac5169d3-6efd-4929-8f0a-b8cfae948182-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.303571 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac5169d3-6efd-4929-8f0a-b8cfae948182-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.303571 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5169d3-6efd-4929-8f0a-b8cfae948182-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.314240 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.338032 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw5l4\" (UniqueName: \"kubernetes.io/projected/ac5169d3-6efd-4929-8f0a-b8cfae948182-kube-api-access-qw5l4\") pod \"openstack-cell1-galera-0\" (UID: \"ac5169d3-6efd-4929-8f0a-b8cfae948182\") " pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.351448 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.413757 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.415568 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.422861 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.423091 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.423177 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-cj29j" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.443240 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.588972 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-config-data\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.589034 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.589085 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cv4k\" (UniqueName: \"kubernetes.io/projected/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-kube-api-access-9cv4k\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.589106 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.589139 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-kolla-config\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.692114 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-config-data\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.692178 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.692242 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cv4k\" (UniqueName: \"kubernetes.io/projected/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-kube-api-access-9cv4k\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.692263 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.692311 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-kolla-config\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.693390 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-kolla-config\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.693899 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-config-data\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.712709 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.712716 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.742206 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cv4k\" (UniqueName: \"kubernetes.io/projected/b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1-kube-api-access-9cv4k\") pod \"memcached-0\" (UID: \"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1\") " pod="openstack/memcached-0" Nov 25 18:29:20 crc kubenswrapper[4926]: I1125 18:29:20.764871 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.636184 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.638146 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.644941 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-4rp7p" Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.660521 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96jtv\" (UniqueName: \"kubernetes.io/projected/c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8-kube-api-access-96jtv\") pod \"kube-state-metrics-0\" (UID: \"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8\") " pod="openstack/kube-state-metrics-0" Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.661168 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.769892 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96jtv\" (UniqueName: \"kubernetes.io/projected/c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8-kube-api-access-96jtv\") pod \"kube-state-metrics-0\" (UID: \"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8\") " pod="openstack/kube-state-metrics-0" Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.824665 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96jtv\" (UniqueName: \"kubernetes.io/projected/c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8-kube-api-access-96jtv\") pod \"kube-state-metrics-0\" (UID: \"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8\") " pod="openstack/kube-state-metrics-0" Nov 25 18:29:22 crc kubenswrapper[4926]: I1125 18:29:22.988477 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.041658 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.044499 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.046823 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.047010 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.047216 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-9xpwm" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.047352 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.047505 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.052108 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.053604 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.111921 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.112005 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.112053 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.112077 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.112097 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.112136 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.112358 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbkn5\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-kube-api-access-fbkn5\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.112629 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214594 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214649 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbkn5\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-kube-api-access-fbkn5\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214690 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214773 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214844 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214891 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214934 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.214956 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.219495 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.225412 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.226392 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.229033 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.230895 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.231361 4926 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.231434 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/967f74eafc5d0ef2758f2567e8e6584104bc92d0318f34ed949bfc88cba8d50f/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.234625 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.248818 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbkn5\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-kube-api-access-fbkn5\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.295233 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.429172 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.772487 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-wwqdd"] Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.773918 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.781048 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.782694 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-7gv72" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.786306 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.788762 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wwqdd"] Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.836998 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-run-ovn\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.837199 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-log-ovn\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.837255 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-run\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.837303 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b840840b-ff6b-439b-b043-7afd451ca6e7-ovn-controller-tls-certs\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.837395 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b840840b-ff6b-439b-b043-7afd451ca6e7-scripts\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.837416 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-css6c\" (UniqueName: \"kubernetes.io/projected/b840840b-ff6b-439b-b043-7afd451ca6e7-kube-api-access-css6c\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.837471 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b840840b-ff6b-439b-b043-7afd451ca6e7-combined-ca-bundle\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.847644 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-qd88n"] Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.852289 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.870509 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-qd88n"] Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941343 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-scripts\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941460 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-etc-ovs\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941499 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-run-ovn\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941523 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-lib\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941542 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-log\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941593 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-log-ovn\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941615 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxgkq\" (UniqueName: \"kubernetes.io/projected/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-kube-api-access-wxgkq\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941645 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-run\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941670 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b840840b-ff6b-439b-b043-7afd451ca6e7-ovn-controller-tls-certs\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941709 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b840840b-ff6b-439b-b043-7afd451ca6e7-scripts\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941736 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-css6c\" (UniqueName: \"kubernetes.io/projected/b840840b-ff6b-439b-b043-7afd451ca6e7-kube-api-access-css6c\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941770 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-run\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.941797 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b840840b-ff6b-439b-b043-7afd451ca6e7-combined-ca-bundle\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.943105 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-log-ovn\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.943268 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-run-ovn\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.943451 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b840840b-ff6b-439b-b043-7afd451ca6e7-var-run\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.946226 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b840840b-ff6b-439b-b043-7afd451ca6e7-scripts\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.959274 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b840840b-ff6b-439b-b043-7afd451ca6e7-ovn-controller-tls-certs\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.967188 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b840840b-ff6b-439b-b043-7afd451ca6e7-combined-ca-bundle\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:24 crc kubenswrapper[4926]: I1125 18:29:24.977861 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-css6c\" (UniqueName: \"kubernetes.io/projected/b840840b-ff6b-439b-b043-7afd451ca6e7-kube-api-access-css6c\") pod \"ovn-controller-wwqdd\" (UID: \"b840840b-ff6b-439b-b043-7afd451ca6e7\") " pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044011 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-scripts\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044077 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-etc-ovs\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044118 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-lib\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044142 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-log\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044206 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxgkq\" (UniqueName: \"kubernetes.io/projected/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-kube-api-access-wxgkq\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044276 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-run\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044420 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-run\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044521 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-lib\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044521 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-etc-ovs\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.044649 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-var-log\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.047305 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-scripts\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.061343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxgkq\" (UniqueName: \"kubernetes.io/projected/cc5c3159-fdbc-49d0-82e6-10ff2e855e5b-kube-api-access-wxgkq\") pod \"ovn-controller-ovs-qd88n\" (UID: \"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b\") " pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.107305 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wwqdd" Nov 25 18:29:25 crc kubenswrapper[4926]: I1125 18:29:25.191857 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.798921 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.800776 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.803889 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.804069 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.804223 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-gjqgn" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.804413 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.805019 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.816135 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987065 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/077c6b4b-7e98-469d-8ab4-48833073ec4c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987115 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987140 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b7p5\" (UniqueName: \"kubernetes.io/projected/077c6b4b-7e98-469d-8ab4-48833073ec4c-kube-api-access-6b7p5\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987362 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/077c6b4b-7e98-469d-8ab4-48833073ec4c-config\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987716 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987782 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987926 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:26 crc kubenswrapper[4926]: I1125 18:29:26.987975 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/077c6b4b-7e98-469d-8ab4-48833073ec4c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090118 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/077c6b4b-7e98-469d-8ab4-48833073ec4c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090176 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090202 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b7p5\" (UniqueName: \"kubernetes.io/projected/077c6b4b-7e98-469d-8ab4-48833073ec4c-kube-api-access-6b7p5\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090235 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/077c6b4b-7e98-469d-8ab4-48833073ec4c-config\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090285 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090304 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090334 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090351 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/077c6b4b-7e98-469d-8ab4-48833073ec4c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.090765 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.091551 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/077c6b4b-7e98-469d-8ab4-48833073ec4c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.091753 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/077c6b4b-7e98-469d-8ab4-48833073ec4c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.092004 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/077c6b4b-7e98-469d-8ab4-48833073ec4c-config\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.097720 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.098468 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.099112 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/077c6b4b-7e98-469d-8ab4-48833073ec4c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.120698 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b7p5\" (UniqueName: \"kubernetes.io/projected/077c6b4b-7e98-469d-8ab4-48833073ec4c-kube-api-access-6b7p5\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.143277 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"077c6b4b-7e98-469d-8ab4-48833073ec4c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:27 crc kubenswrapper[4926]: I1125 18:29:27.426250 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.256181 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.259051 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.272622 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.272800 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-tflgh" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.272915 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.273601 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.282342 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397471 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397552 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397634 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397680 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397705 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqd5r\" (UniqueName: \"kubernetes.io/projected/42262798-4ac0-4099-b0b9-1820f77802cc-kube-api-access-hqd5r\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397727 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/42262798-4ac0-4099-b0b9-1820f77802cc-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397797 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42262798-4ac0-4099-b0b9-1820f77802cc-config\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.397834 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42262798-4ac0-4099-b0b9-1820f77802cc-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499248 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42262798-4ac0-4099-b0b9-1820f77802cc-config\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499330 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42262798-4ac0-4099-b0b9-1820f77802cc-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499413 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499477 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499538 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499596 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499632 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqd5r\" (UniqueName: \"kubernetes.io/projected/42262798-4ac0-4099-b0b9-1820f77802cc-kube-api-access-hqd5r\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.499660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/42262798-4ac0-4099-b0b9-1820f77802cc-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.500489 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42262798-4ac0-4099-b0b9-1820f77802cc-config\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.500542 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/42262798-4ac0-4099-b0b9-1820f77802cc-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.500976 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42262798-4ac0-4099-b0b9-1820f77802cc-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.501070 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.511875 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.528510 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.546522 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42262798-4ac0-4099-b0b9-1820f77802cc-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.550323 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqd5r\" (UniqueName: \"kubernetes.io/projected/42262798-4ac0-4099-b0b9-1820f77802cc-kube-api-access-hqd5r\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.570707 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"42262798-4ac0-4099-b0b9-1820f77802cc\") " pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:30 crc kubenswrapper[4926]: I1125 18:29:30.647828 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 18:29:47 crc kubenswrapper[4926]: E1125 18:29:47.918516 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-mariadb:watcher_latest" Nov 25 18:29:47 crc kubenswrapper[4926]: E1125 18:29:47.919441 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-mariadb:watcher_latest" Nov 25 18:29:47 crc kubenswrapper[4926]: E1125 18:29:47.919707 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:38.102.83.27:5001/podified-master-centos10/openstack-mariadb:watcher_latest,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z6458,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(b04996f9-1035-4982-bd9b-f96ee30cd663): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:47 crc kubenswrapper[4926]: E1125 18:29:47.922183 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" Nov 25 18:29:48 crc kubenswrapper[4926]: E1125 18:29:48.548784 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-mariadb:watcher_latest\\\"\"" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.805177 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.805611 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.805752 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lppph,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-notifications-server-0_openstack(e53ddff3-6cac-43f4-98c6-f909431098f1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.807213 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-notifications-server-0" podUID="e53ddff3-6cac-43f4-98c6-f909431098f1" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.811764 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.811864 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.812116 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xb9r6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(c8310425-a9bc-4c42-9caf-9c1a70041d2c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.813303 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.815043 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.815078 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.815195 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ndt9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(38e40083-2404-4c67-88b5-41ccaf693c6e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:51 crc kubenswrapper[4926]: E1125 18:29:51.816457 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" Nov 25 18:29:52 crc kubenswrapper[4926]: E1125 18:29:52.584151 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" Nov 25 18:29:52 crc kubenswrapper[4926]: E1125 18:29:52.584199 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-server-0" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" Nov 25 18:29:52 crc kubenswrapper[4926]: E1125 18:29:52.589344 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-notifications-server-0" podUID="e53ddff3-6cac-43f4-98c6-f909431098f1" Nov 25 18:29:56 crc kubenswrapper[4926]: I1125 18:29:56.350969 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 18:29:56 crc kubenswrapper[4926]: I1125 18:29:56.359660 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:29:56 crc kubenswrapper[4926]: I1125 18:29:56.365153 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 18:29:56 crc kubenswrapper[4926]: W1125 18:29:56.604150 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7b8348c_1f7d_41f0_9f36_29f6d84ef2d1.slice/crio-14a4b0e99ab3c1566781b823b3c27d89be4014ef2c149d236638460f8542eb7e WatchSource:0}: Error finding container 14a4b0e99ab3c1566781b823b3c27d89be4014ef2c149d236638460f8542eb7e: Status 404 returned error can't find the container with id 14a4b0e99ab3c1566781b823b3c27d89be4014ef2c149d236638460f8542eb7e Nov 25 18:29:56 crc kubenswrapper[4926]: I1125 18:29:56.641975 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1","Type":"ContainerStarted","Data":"14a4b0e99ab3c1566781b823b3c27d89be4014ef2c149d236638460f8542eb7e"} Nov 25 18:29:56 crc kubenswrapper[4926]: I1125 18:29:56.646228 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8","Type":"ContainerStarted","Data":"99bcbec0308442ba333870b02c84ff0ec32f2801a06df92278567b65a0fc8859"} Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.650625 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.650685 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.650861 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qncjk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-f9f8f485-d9tv5_openstack(7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:56 crc kubenswrapper[4926]: I1125 18:29:56.651276 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"077c6b4b-7e98-469d-8ab4-48833073ec4c","Type":"ContainerStarted","Data":"4092225cc1e4eabb3a9db0a2a425f036449aed6c9436cc72fdcfa2d4a4003a9f"} Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.653352 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" podUID="7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.671790 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.671869 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.672096 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-55rrv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-fd4646b5-rk4lz_openstack(0b1b48ad-7c6a-43a0-8c30-83575173ddaf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.676197 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.677863 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.677944 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.678101 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xkv6n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84c8bd76c9-rfsnl_openstack(a0b98179-3a67-4965-94c3-6192d4d3a135): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.679593 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" podUID="a0b98179-3a67-4965-94c3-6192d4d3a135" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.692416 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.692478 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.692602 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c6h54h5b5h8fh59chb8h657h5c6hfbhfh4h68fh5f7h9fhc7h594h8hc7h5bfh56ch5fbh688h5bch699h5f6h55fh564h64h5dfh5dch586h75q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2k4jr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-67df4d8c45-q7gpj_openstack(9dab0894-9d3e-48a2-a444-8da369542d2a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.693721 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" podUID="9dab0894-9d3e-48a2-a444-8da369542d2a" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.703454 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.703522 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.703860 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c7h56dh5cfh8bh54fhbbhf4h5b9hdch67fhd7h55fh55fh6ch9h548h54ch665h647h6h8fhd6h5dfh5cdh58bh577h66fh695h5fbh55h77h5fcq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zhnmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57fd94878f-v6npq_openstack(9ced89d4-a75a-4c9b-b9e4-be11934799d3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:29:56 crc kubenswrapper[4926]: E1125 18:29:56.705252 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.059715 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wwqdd"] Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.064685 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.210527 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-qd88n"] Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.272083 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.323365 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 18:29:57 crc kubenswrapper[4926]: W1125 18:29:57.341355 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42262798_4ac0_4099_b0b9_1820f77802cc.slice/crio-37adf9bcfe3b9338b252c7a4d2e6dcc3d1060325df0886f37261c006197799d4 WatchSource:0}: Error finding container 37adf9bcfe3b9338b252c7a4d2e6dcc3d1060325df0886f37261c006197799d4: Status 404 returned error can't find the container with id 37adf9bcfe3b9338b252c7a4d2e6dcc3d1060325df0886f37261c006197799d4 Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.662922 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ac5169d3-6efd-4929-8f0a-b8cfae948182","Type":"ContainerStarted","Data":"d0a5f7f0bd9dbc2d8f495c37b7431ff0a25ac77223f6f3ab81aead683425837c"} Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.664038 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wwqdd" event={"ID":"b840840b-ff6b-439b-b043-7afd451ca6e7","Type":"ContainerStarted","Data":"a70286b825888c29938822a6c512849b328ea86c04f1dcf9a52421d259236a53"} Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.664898 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"42262798-4ac0-4099-b0b9-1820f77802cc","Type":"ContainerStarted","Data":"37adf9bcfe3b9338b252c7a4d2e6dcc3d1060325df0886f37261c006197799d4"} Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.665804 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerStarted","Data":"f6a12375ef3a563cbff524f973c0f17396432e8c666a63498ed727b7e35944bd"} Nov 25 18:29:57 crc kubenswrapper[4926]: I1125 18:29:57.666734 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qd88n" event={"ID":"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b","Type":"ContainerStarted","Data":"ea8bd4ce72a0eb77cf14073864648ef14e7984bee3d31f5e6000f9380cf2ca4d"} Nov 25 18:29:57 crc kubenswrapper[4926]: E1125 18:29:57.669629 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest\\\"\"" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" Nov 25 18:29:57 crc kubenswrapper[4926]: E1125 18:29:57.669661 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-neutron-server:watcher_latest\\\"\"" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.216727 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.219724 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.236035 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345353 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-config\") pod \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345547 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b98179-3a67-4965-94c3-6192d4d3a135-config\") pod \"a0b98179-3a67-4965-94c3-6192d4d3a135\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345581 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qncjk\" (UniqueName: \"kubernetes.io/projected/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-kube-api-access-qncjk\") pod \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345753 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-config\") pod \"9dab0894-9d3e-48a2-a444-8da369542d2a\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345790 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k4jr\" (UniqueName: \"kubernetes.io/projected/9dab0894-9d3e-48a2-a444-8da369542d2a-kube-api-access-2k4jr\") pod \"9dab0894-9d3e-48a2-a444-8da369542d2a\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345818 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-dns-svc\") pod \"9dab0894-9d3e-48a2-a444-8da369542d2a\" (UID: \"9dab0894-9d3e-48a2-a444-8da369542d2a\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345850 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkv6n\" (UniqueName: \"kubernetes.io/projected/a0b98179-3a67-4965-94c3-6192d4d3a135-kube-api-access-xkv6n\") pod \"a0b98179-3a67-4965-94c3-6192d4d3a135\" (UID: \"a0b98179-3a67-4965-94c3-6192d4d3a135\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.345907 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-dns-svc\") pod \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\" (UID: \"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a\") " Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.346321 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-config" (OuterVolumeSpecName: "config") pod "7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a" (UID: "7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.346566 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.346569 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-config" (OuterVolumeSpecName: "config") pod "9dab0894-9d3e-48a2-a444-8da369542d2a" (UID: "9dab0894-9d3e-48a2-a444-8da369542d2a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.346953 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0b98179-3a67-4965-94c3-6192d4d3a135-config" (OuterVolumeSpecName: "config") pod "a0b98179-3a67-4965-94c3-6192d4d3a135" (UID: "a0b98179-3a67-4965-94c3-6192d4d3a135"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.348292 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9dab0894-9d3e-48a2-a444-8da369542d2a" (UID: "9dab0894-9d3e-48a2-a444-8da369542d2a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.348792 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a" (UID: "7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.353639 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0b98179-3a67-4965-94c3-6192d4d3a135-kube-api-access-xkv6n" (OuterVolumeSpecName: "kube-api-access-xkv6n") pod "a0b98179-3a67-4965-94c3-6192d4d3a135" (UID: "a0b98179-3a67-4965-94c3-6192d4d3a135"). InnerVolumeSpecName "kube-api-access-xkv6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.354619 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dab0894-9d3e-48a2-a444-8da369542d2a-kube-api-access-2k4jr" (OuterVolumeSpecName: "kube-api-access-2k4jr") pod "9dab0894-9d3e-48a2-a444-8da369542d2a" (UID: "9dab0894-9d3e-48a2-a444-8da369542d2a"). InnerVolumeSpecName "kube-api-access-2k4jr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.362553 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-kube-api-access-qncjk" (OuterVolumeSpecName: "kube-api-access-qncjk") pod "7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a" (UID: "7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a"). InnerVolumeSpecName "kube-api-access-qncjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.449160 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qncjk\" (UniqueName: \"kubernetes.io/projected/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-kube-api-access-qncjk\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.449207 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b98179-3a67-4965-94c3-6192d4d3a135-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.449220 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.449235 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k4jr\" (UniqueName: \"kubernetes.io/projected/9dab0894-9d3e-48a2-a444-8da369542d2a-kube-api-access-2k4jr\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.449247 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dab0894-9d3e-48a2-a444-8da369542d2a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.449260 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkv6n\" (UniqueName: \"kubernetes.io/projected/a0b98179-3a67-4965-94c3-6192d4d3a135-kube-api-access-xkv6n\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.449271 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.677410 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.677447 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84c8bd76c9-rfsnl" event={"ID":"a0b98179-3a67-4965-94c3-6192d4d3a135","Type":"ContainerDied","Data":"3dda8558b2573f887e289a71286be604a38cbd703237e5b395cc6a8a17582923"} Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.679232 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" event={"ID":"9dab0894-9d3e-48a2-a444-8da369542d2a","Type":"ContainerDied","Data":"9e37c52d80beb5fc5bb1ac7f70bbbb8b2b63ea545af9cef62b2d5cca5679a9ee"} Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.679445 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67df4d8c45-q7gpj" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.681768 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.681761 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f9f8f485-d9tv5" event={"ID":"7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a","Type":"ContainerDied","Data":"1c68211cc6d05595287ffe7579b5072132808669d7f7ef13e16acf56f7c1ddef"} Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.721170 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84c8bd76c9-rfsnl"] Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.725963 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84c8bd76c9-rfsnl"] Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.758103 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67df4d8c45-q7gpj"] Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.773185 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67df4d8c45-q7gpj"] Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.792609 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f9f8f485-d9tv5"] Nov 25 18:29:58 crc kubenswrapper[4926]: I1125 18:29:58.798277 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f9f8f485-d9tv5"] Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.157289 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc"] Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.159390 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.162522 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.162721 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.173236 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc"] Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.289395 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tvfs\" (UniqueName: \"kubernetes.io/projected/136a79fc-cb97-4d69-8e29-8907348a37ce-kube-api-access-5tvfs\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.289448 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136a79fc-cb97-4d69-8e29-8907348a37ce-config-volume\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.289512 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136a79fc-cb97-4d69-8e29-8907348a37ce-secret-volume\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.344465 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a" path="/var/lib/kubelet/pods/7e5b1615-b574-4cc7-ac61-e6c3cd2aef0a/volumes" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.344922 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dab0894-9d3e-48a2-a444-8da369542d2a" path="/var/lib/kubelet/pods/9dab0894-9d3e-48a2-a444-8da369542d2a/volumes" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.345438 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0b98179-3a67-4965-94c3-6192d4d3a135" path="/var/lib/kubelet/pods/a0b98179-3a67-4965-94c3-6192d4d3a135/volumes" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.390924 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tvfs\" (UniqueName: \"kubernetes.io/projected/136a79fc-cb97-4d69-8e29-8907348a37ce-kube-api-access-5tvfs\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.390983 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136a79fc-cb97-4d69-8e29-8907348a37ce-config-volume\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.391086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136a79fc-cb97-4d69-8e29-8907348a37ce-secret-volume\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.393960 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136a79fc-cb97-4d69-8e29-8907348a37ce-config-volume\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.414310 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136a79fc-cb97-4d69-8e29-8907348a37ce-secret-volume\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.419359 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tvfs\" (UniqueName: \"kubernetes.io/projected/136a79fc-cb97-4d69-8e29-8907348a37ce-kube-api-access-5tvfs\") pod \"collect-profiles-29401590-jbjjc\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:00 crc kubenswrapper[4926]: I1125 18:30:00.493951 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:02 crc kubenswrapper[4926]: I1125 18:30:02.556408 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc"] Nov 25 18:30:02 crc kubenswrapper[4926]: W1125 18:30:02.587160 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod136a79fc_cb97_4d69_8e29_8907348a37ce.slice/crio-99cd299a56ee0204fee5658f899b5563d27fc0beced3e8f5f350ee7450168297 WatchSource:0}: Error finding container 99cd299a56ee0204fee5658f899b5563d27fc0beced3e8f5f350ee7450168297: Status 404 returned error can't find the container with id 99cd299a56ee0204fee5658f899b5563d27fc0beced3e8f5f350ee7450168297 Nov 25 18:30:02 crc kubenswrapper[4926]: I1125 18:30:02.719482 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" event={"ID":"136a79fc-cb97-4d69-8e29-8907348a37ce","Type":"ContainerStarted","Data":"99cd299a56ee0204fee5658f899b5563d27fc0beced3e8f5f350ee7450168297"} Nov 25 18:30:03 crc kubenswrapper[4926]: I1125 18:30:03.735981 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1","Type":"ContainerStarted","Data":"f68b929e505811e96a72f7f85677798831c895ccc8f8fda5f2d0ad79bbc7a78f"} Nov 25 18:30:03 crc kubenswrapper[4926]: I1125 18:30:03.737072 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 18:30:03 crc kubenswrapper[4926]: I1125 18:30:03.761982 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=38.853807886 podStartE2EDuration="43.761955563s" podCreationTimestamp="2025-11-25 18:29:20 +0000 UTC" firstStartedPulling="2025-11-25 18:29:56.608987016 +0000 UTC m=+1026.994500621" lastFinishedPulling="2025-11-25 18:30:01.517134693 +0000 UTC m=+1031.902648298" observedRunningTime="2025-11-25 18:30:03.756270535 +0000 UTC m=+1034.141784140" watchObservedRunningTime="2025-11-25 18:30:03.761955563 +0000 UTC m=+1034.147469178" Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.745623 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ac5169d3-6efd-4929-8f0a-b8cfae948182","Type":"ContainerStarted","Data":"c017427b65ba44c2079bdd71bc3ed9e2ee504d3d4e6dff488c9c5430624f9ef0"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.748312 4926 generic.go:334] "Generic (PLEG): container finished" podID="136a79fc-cb97-4d69-8e29-8907348a37ce" containerID="2a3f6ecd7a0fa20487992bf3ef3c6e3ef316c4a3dea291e98a577dcd7982d458" exitCode=0 Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.748390 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" event={"ID":"136a79fc-cb97-4d69-8e29-8907348a37ce","Type":"ContainerDied","Data":"2a3f6ecd7a0fa20487992bf3ef3c6e3ef316c4a3dea291e98a577dcd7982d458"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.749973 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wwqdd" event={"ID":"b840840b-ff6b-439b-b043-7afd451ca6e7","Type":"ContainerStarted","Data":"0f6ba8003a2eea86fbc0a32b0068d18e36ab50377112774da168a6e9fb5ad480"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.750203 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-wwqdd" Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.751253 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"42262798-4ac0-4099-b0b9-1820f77802cc","Type":"ContainerStarted","Data":"3f012d2a4346c30fb1a274740d5b570a48b5fbd18973478e9a141245118bf29c"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.753404 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8","Type":"ContainerStarted","Data":"7b5790e546b285a77b792d10763aefeb92245339fdec2cc6858f05ffbfc9ecca"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.753533 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.754945 4926 generic.go:334] "Generic (PLEG): container finished" podID="cc5c3159-fdbc-49d0-82e6-10ff2e855e5b" containerID="236eec0609e5357175bc3377ae37db937d4d3bcd427a53749c29c1599e9fb0b3" exitCode=0 Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.754998 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qd88n" event={"ID":"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b","Type":"ContainerDied","Data":"236eec0609e5357175bc3377ae37db937d4d3bcd427a53749c29c1599e9fb0b3"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.756818 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"077c6b4b-7e98-469d-8ab4-48833073ec4c","Type":"ContainerStarted","Data":"18cc015434c4fd322bcbf750ece7afe1648488e7248963acbe0707f5c06a8e83"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.758726 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b04996f9-1035-4982-bd9b-f96ee30cd663","Type":"ContainerStarted","Data":"593d993183ad18fe49252cda910b3ed5a128daac5fa20808848f040334045ca6"} Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.931584 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-wwqdd" podStartSLOduration=35.879631786 podStartE2EDuration="40.931555979s" podCreationTimestamp="2025-11-25 18:29:24 +0000 UTC" firstStartedPulling="2025-11-25 18:29:57.076753953 +0000 UTC m=+1027.462267558" lastFinishedPulling="2025-11-25 18:30:02.128678146 +0000 UTC m=+1032.514191751" observedRunningTime="2025-11-25 18:30:04.880555164 +0000 UTC m=+1035.266068799" watchObservedRunningTime="2025-11-25 18:30:04.931555979 +0000 UTC m=+1035.317069584" Nov 25 18:30:04 crc kubenswrapper[4926]: I1125 18:30:04.948727 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=36.232638382 podStartE2EDuration="42.948695024s" podCreationTimestamp="2025-11-25 18:29:22 +0000 UTC" firstStartedPulling="2025-11-25 18:29:56.603056412 +0000 UTC m=+1026.988570017" lastFinishedPulling="2025-11-25 18:30:03.319113064 +0000 UTC m=+1033.704626659" observedRunningTime="2025-11-25 18:30:04.944708881 +0000 UTC m=+1035.330222486" watchObservedRunningTime="2025-11-25 18:30:04.948695024 +0000 UTC m=+1035.334208639" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.146691 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.209496 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tvfs\" (UniqueName: \"kubernetes.io/projected/136a79fc-cb97-4d69-8e29-8907348a37ce-kube-api-access-5tvfs\") pod \"136a79fc-cb97-4d69-8e29-8907348a37ce\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.209696 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136a79fc-cb97-4d69-8e29-8907348a37ce-secret-volume\") pod \"136a79fc-cb97-4d69-8e29-8907348a37ce\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.209762 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136a79fc-cb97-4d69-8e29-8907348a37ce-config-volume\") pod \"136a79fc-cb97-4d69-8e29-8907348a37ce\" (UID: \"136a79fc-cb97-4d69-8e29-8907348a37ce\") " Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.211674 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/136a79fc-cb97-4d69-8e29-8907348a37ce-config-volume" (OuterVolumeSpecName: "config-volume") pod "136a79fc-cb97-4d69-8e29-8907348a37ce" (UID: "136a79fc-cb97-4d69-8e29-8907348a37ce"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.216657 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/136a79fc-cb97-4d69-8e29-8907348a37ce-kube-api-access-5tvfs" (OuterVolumeSpecName: "kube-api-access-5tvfs") pod "136a79fc-cb97-4d69-8e29-8907348a37ce" (UID: "136a79fc-cb97-4d69-8e29-8907348a37ce"). InnerVolumeSpecName "kube-api-access-5tvfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.239730 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/136a79fc-cb97-4d69-8e29-8907348a37ce-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "136a79fc-cb97-4d69-8e29-8907348a37ce" (UID: "136a79fc-cb97-4d69-8e29-8907348a37ce"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.312207 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/136a79fc-cb97-4d69-8e29-8907348a37ce-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.312253 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tvfs\" (UniqueName: \"kubernetes.io/projected/136a79fc-cb97-4d69-8e29-8907348a37ce-kube-api-access-5tvfs\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.312264 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/136a79fc-cb97-4d69-8e29-8907348a37ce-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.788970 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" event={"ID":"136a79fc-cb97-4d69-8e29-8907348a37ce","Type":"ContainerDied","Data":"99cd299a56ee0204fee5658f899b5563d27fc0beced3e8f5f350ee7450168297"} Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.789034 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.789039 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99cd299a56ee0204fee5658f899b5563d27fc0beced3e8f5f350ee7450168297" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.795261 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerStarted","Data":"cbce76ede70b249c42a069ec335203867ae05f8ca403df80939ab682b263e607"} Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.798863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qd88n" event={"ID":"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b","Type":"ContainerStarted","Data":"5ca3a2e52f696adfaddcdb8cc9674b2e8b71f5f965d3ed2fcf07018be635569d"} Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.798893 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qd88n" event={"ID":"cc5c3159-fdbc-49d0-82e6-10ff2e855e5b","Type":"ContainerStarted","Data":"308828b824f3513dd944e946b05da7f168cd6618b8fa2d96baf9826c9974a54b"} Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.798985 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:30:06 crc kubenswrapper[4926]: I1125 18:30:06.799005 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:30:07 crc kubenswrapper[4926]: I1125 18:30:07.184912 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-qd88n" podStartSLOduration=38.646849053 podStartE2EDuration="43.184885151s" podCreationTimestamp="2025-11-25 18:29:24 +0000 UTC" firstStartedPulling="2025-11-25 18:29:57.346664898 +0000 UTC m=+1027.732178503" lastFinishedPulling="2025-11-25 18:30:01.884700956 +0000 UTC m=+1032.270214601" observedRunningTime="2025-11-25 18:30:06.839983607 +0000 UTC m=+1037.225497212" watchObservedRunningTime="2025-11-25 18:30:07.184885151 +0000 UTC m=+1037.570398756" Nov 25 18:30:10 crc kubenswrapper[4926]: I1125 18:30:10.768223 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 18:30:10 crc kubenswrapper[4926]: I1125 18:30:10.840499 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8310425-a9bc-4c42-9caf-9c1a70041d2c","Type":"ContainerStarted","Data":"4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84"} Nov 25 18:30:11 crc kubenswrapper[4926]: I1125 18:30:11.851863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"077c6b4b-7e98-469d-8ab4-48833073ec4c","Type":"ContainerStarted","Data":"73ea72f1c54d4e68dfae7845081ed19bc05f5ff77242487be1bf431a20ceb99c"} Nov 25 18:30:11 crc kubenswrapper[4926]: I1125 18:30:11.853894 4926 generic.go:334] "Generic (PLEG): container finished" podID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerID="9f7041fcc70c1176081f25ce6e031efe523399be665bb51b2772135ed21d11e1" exitCode=0 Nov 25 18:30:11 crc kubenswrapper[4926]: I1125 18:30:11.853980 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" event={"ID":"9ced89d4-a75a-4c9b-b9e4-be11934799d3","Type":"ContainerDied","Data":"9f7041fcc70c1176081f25ce6e031efe523399be665bb51b2772135ed21d11e1"} Nov 25 18:30:11 crc kubenswrapper[4926]: I1125 18:30:11.858320 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"42262798-4ac0-4099-b0b9-1820f77802cc","Type":"ContainerStarted","Data":"2de8d27cd247bf0ae45a0a3e819e9886fc01788a21adc03141dcdb7de04a495d"} Nov 25 18:30:11 crc kubenswrapper[4926]: I1125 18:30:11.888801 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=32.767313509 podStartE2EDuration="46.888772208s" podCreationTimestamp="2025-11-25 18:29:25 +0000 UTC" firstStartedPulling="2025-11-25 18:29:56.599017277 +0000 UTC m=+1026.984530872" lastFinishedPulling="2025-11-25 18:30:10.720475956 +0000 UTC m=+1041.105989571" observedRunningTime="2025-11-25 18:30:11.885940145 +0000 UTC m=+1042.271453770" watchObservedRunningTime="2025-11-25 18:30:11.888772208 +0000 UTC m=+1042.274285813" Nov 25 18:30:11 crc kubenswrapper[4926]: I1125 18:30:11.945900 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=29.976431782 podStartE2EDuration="42.945875112s" podCreationTimestamp="2025-11-25 18:29:29 +0000 UTC" firstStartedPulling="2025-11-25 18:29:57.343983598 +0000 UTC m=+1027.729497203" lastFinishedPulling="2025-11-25 18:30:10.313426878 +0000 UTC m=+1040.698940533" observedRunningTime="2025-11-25 18:30:11.94116317 +0000 UTC m=+1042.326676785" watchObservedRunningTime="2025-11-25 18:30:11.945875112 +0000 UTC m=+1042.331388717" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.427470 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.427539 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.468859 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.649033 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.848825 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.867490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"38e40083-2404-4c67-88b5-41ccaf693c6e","Type":"ContainerStarted","Data":"1140a4cb17c37bbcea4321661bee910d6f09a5df0dc77e80ce7b4b99b90a2dc5"} Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.869418 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"e53ddff3-6cac-43f4-98c6-f909431098f1","Type":"ContainerStarted","Data":"03e7a04f31ba17233ccde141bd80aa68f099bc7c9d13f8f03a1e04d3465c09cb"} Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.873980 4926 generic.go:334] "Generic (PLEG): container finished" podID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerID="6d4ed31464a7bbced35e36d00065d5363eb11415c17604f22eeda9956c286913" exitCode=0 Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.874076 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" event={"ID":"0b1b48ad-7c6a-43a0-8c30-83575173ddaf","Type":"ContainerDied","Data":"6d4ed31464a7bbced35e36d00065d5363eb11415c17604f22eeda9956c286913"} Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.876822 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" event={"ID":"9ced89d4-a75a-4c9b-b9e4-be11934799d3","Type":"ContainerStarted","Data":"822c9601bd17b9f55843eeca3824c8ebe99d77f52edf57aa9144d0b7943e6d87"} Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.877174 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.877489 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.946989 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.980738 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 18:30:12 crc kubenswrapper[4926]: I1125 18:30:12.994815 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.013136 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" podStartSLOduration=3.319266394 podStartE2EDuration="57.013104828s" podCreationTimestamp="2025-11-25 18:29:16 +0000 UTC" firstStartedPulling="2025-11-25 18:29:17.054860445 +0000 UTC m=+987.440374050" lastFinishedPulling="2025-11-25 18:30:10.748698879 +0000 UTC m=+1041.134212484" observedRunningTime="2025-11-25 18:30:12.98312708 +0000 UTC m=+1043.368640685" watchObservedRunningTime="2025-11-25 18:30:13.013104828 +0000 UTC m=+1043.398618433" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.178078 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fd4646b5-rk4lz"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.218203 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7957cdccbf-jhg88"] Nov 25 18:30:13 crc kubenswrapper[4926]: E1125 18:30:13.218594 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="136a79fc-cb97-4d69-8e29-8907348a37ce" containerName="collect-profiles" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.218616 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="136a79fc-cb97-4d69-8e29-8907348a37ce" containerName="collect-profiles" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.220346 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="136a79fc-cb97-4d69-8e29-8907348a37ce" containerName="collect-profiles" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.226427 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.240705 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7957cdccbf-jhg88"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.263107 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-dns-svc\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.263195 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shzn7\" (UniqueName: \"kubernetes.io/projected/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-kube-api-access-shzn7\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.263280 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-config\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.364585 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57fd94878f-v6npq"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.365471 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-config\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.365589 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-dns-svc\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.365623 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shzn7\" (UniqueName: \"kubernetes.io/projected/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-kube-api-access-shzn7\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.367151 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-config\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.367807 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-dns-svc\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.406629 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shzn7\" (UniqueName: \"kubernetes.io/projected/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-kube-api-access-shzn7\") pod \"dnsmasq-dns-7957cdccbf-jhg88\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.430654 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-6mv24"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.431823 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.441789 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.448898 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-748bf9bd79-zt2dw"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.458438 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.464585 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467094 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7vj4\" (UniqueName: \"kubernetes.io/projected/3ef2a368-afb0-4109-a846-123a6d3a88e0-kube-api-access-m7vj4\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467296 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-ovn-rundir\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467546 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-ovsdbserver-sb\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467623 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-dns-svc\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467654 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-ovs-rundir\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467669 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-combined-ca-bundle\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467751 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-config\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467791 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvpdb\" (UniqueName: \"kubernetes.io/projected/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-kube-api-access-lvpdb\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467850 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-config\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.467915 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.469204 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-6mv24"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.505195 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-748bf9bd79-zt2dw"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.557163 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570198 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-config\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570265 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570332 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7vj4\" (UniqueName: \"kubernetes.io/projected/3ef2a368-afb0-4109-a846-123a6d3a88e0-kube-api-access-m7vj4\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570354 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-ovn-rundir\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570401 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-ovsdbserver-sb\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570422 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-dns-svc\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570439 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-ovs-rundir\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570457 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-combined-ca-bundle\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570486 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-config\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.570512 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvpdb\" (UniqueName: \"kubernetes.io/projected/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-kube-api-access-lvpdb\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.571699 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-config\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.572236 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-ovsdbserver-sb\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.572315 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-dns-svc\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.574128 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-ovs-rundir\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.577716 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-config\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.578162 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-ovn-rundir\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.579893 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-combined-ca-bundle\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.585023 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.649173 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvpdb\" (UniqueName: \"kubernetes.io/projected/e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230-kube-api-access-lvpdb\") pod \"ovn-controller-metrics-6mv24\" (UID: \"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230\") " pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.685791 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7vj4\" (UniqueName: \"kubernetes.io/projected/3ef2a368-afb0-4109-a846-123a6d3a88e0-kube-api-access-m7vj4\") pod \"dnsmasq-dns-748bf9bd79-zt2dw\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.702917 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-748bf9bd79-zt2dw"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.703814 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.714277 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.717282 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.729796 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.740166 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.744278 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-sdwnd" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.744675 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.766625 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.777275 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1544f9d3-aef4-4a8c-af9f-af4bea56f954-scripts\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.777349 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdxdb\" (UniqueName: \"kubernetes.io/projected/1544f9d3-aef4-4a8c-af9f-af4bea56f954-kube-api-access-pdxdb\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.777401 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.777464 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1544f9d3-aef4-4a8c-af9f-af4bea56f954-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.777498 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.777521 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1544f9d3-aef4-4a8c-af9f-af4bea56f954-config\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.777555 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.792341 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-6mv24" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.801368 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84449c6c77-rhr8j"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.803274 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.806062 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.824870 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84449c6c77-rhr8j"] Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.878779 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.878832 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-nb\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.878856 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-sb\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.878906 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjh4d\" (UniqueName: \"kubernetes.io/projected/7196b83b-c064-4190-94f0-e314cd1a6f21-kube-api-access-hjh4d\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.878955 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1544f9d3-aef4-4a8c-af9f-af4bea56f954-scripts\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.878973 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdxdb\" (UniqueName: \"kubernetes.io/projected/1544f9d3-aef4-4a8c-af9f-af4bea56f954-kube-api-access-pdxdb\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.878995 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.879018 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-dns-svc\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.879051 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1544f9d3-aef4-4a8c-af9f-af4bea56f954-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.879080 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.879104 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-config\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.879124 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1544f9d3-aef4-4a8c-af9f-af4bea56f954-config\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.880102 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1544f9d3-aef4-4a8c-af9f-af4bea56f954-config\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.882067 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1544f9d3-aef4-4a8c-af9f-af4bea56f954-scripts\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.882359 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1544f9d3-aef4-4a8c-af9f-af4bea56f954-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.886345 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.890067 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.895495 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1544f9d3-aef4-4a8c-af9f-af4bea56f954-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.902015 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdxdb\" (UniqueName: \"kubernetes.io/projected/1544f9d3-aef4-4a8c-af9f-af4bea56f954-kube-api-access-pdxdb\") pod \"ovn-northd-0\" (UID: \"1544f9d3-aef4-4a8c-af9f-af4bea56f954\") " pod="openstack/ovn-northd-0" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.902626 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" event={"ID":"0b1b48ad-7c6a-43a0-8c30-83575173ddaf","Type":"ContainerStarted","Data":"3870af6d4d054e7343f1f5bb997fe8f4904f2451d3eab09e3133dd410dff549a"} Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.904717 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerName="dnsmasq-dns" containerID="cri-o://3870af6d4d054e7343f1f5bb997fe8f4904f2451d3eab09e3133dd410dff549a" gracePeriod=10 Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.934063 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" podStartSLOduration=-9223371977.920734 podStartE2EDuration="58.934041512s" podCreationTimestamp="2025-11-25 18:29:15 +0000 UTC" firstStartedPulling="2025-11-25 18:29:16.372334687 +0000 UTC m=+986.757848292" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:30:13.933524809 +0000 UTC m=+1044.319038434" watchObservedRunningTime="2025-11-25 18:30:13.934041512 +0000 UTC m=+1044.319555107" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.981397 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-config\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.981466 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-nb\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.981488 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-sb\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.981546 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjh4d\" (UniqueName: \"kubernetes.io/projected/7196b83b-c064-4190-94f0-e314cd1a6f21-kube-api-access-hjh4d\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.981631 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-dns-svc\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.982598 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-config\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.982754 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-dns-svc\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.983131 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-nb\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:13 crc kubenswrapper[4926]: I1125 18:30:13.984668 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-sb\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.008879 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjh4d\" (UniqueName: \"kubernetes.io/projected/7196b83b-c064-4190-94f0-e314cd1a6f21-kube-api-access-hjh4d\") pod \"dnsmasq-dns-84449c6c77-rhr8j\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.094563 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.156002 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.325441 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7957cdccbf-jhg88"] Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.362340 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.369520 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.371727 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.371963 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.372243 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.372454 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-n866l" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.474864 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.498262 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsjts\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-kube-api-access-xsjts\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.499150 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-cache\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.499206 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.499469 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.499556 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-lock\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.500785 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-748bf9bd79-zt2dw"] Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.603086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.603237 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-lock\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.603340 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsjts\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-kube-api-access-xsjts\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.603397 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-cache\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.603429 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: E1125 18:30:14.603421 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 18:30:14 crc kubenswrapper[4926]: E1125 18:30:14.603463 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 18:30:14 crc kubenswrapper[4926]: E1125 18:30:14.603560 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift podName:e1d8eee0-eb0b-41ad-b486-e7b20ffee29a nodeName:}" failed. No retries permitted until 2025-11-25 18:30:15.103526351 +0000 UTC m=+1045.489039956 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift") pod "swift-storage-0" (UID: "e1d8eee0-eb0b-41ad-b486-e7b20ffee29a") : configmap "swift-ring-files" not found Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.603934 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.604531 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-lock\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.604568 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-cache\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.624342 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsjts\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-kube-api-access-xsjts\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.679851 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-6mv24"] Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.696685 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.935171 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.944898 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-6mv24" event={"ID":"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230","Type":"ContainerStarted","Data":"9f99f0dd7d81a7c616fc327241f3db9899d3362dd77b845165594bcda655297e"} Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.956388 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" event={"ID":"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec","Type":"ContainerStarted","Data":"0ed3982b563f3779e2853039b19196158359cf4754ef6d81f5201c4b5ae26d3c"} Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.962717 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerID="cbce76ede70b249c42a069ec335203867ae05f8ca403df80939ab682b263e607" exitCode=0 Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.963019 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerDied","Data":"cbce76ede70b249c42a069ec335203867ae05f8ca403df80939ab682b263e607"} Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.969901 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84449c6c77-rhr8j"] Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.983114 4926 generic.go:334] "Generic (PLEG): container finished" podID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerID="3870af6d4d054e7343f1f5bb997fe8f4904f2451d3eab09e3133dd410dff549a" exitCode=0 Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.983625 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" event={"ID":"0b1b48ad-7c6a-43a0-8c30-83575173ddaf","Type":"ContainerDied","Data":"3870af6d4d054e7343f1f5bb997fe8f4904f2451d3eab09e3133dd410dff549a"} Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.997185 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" event={"ID":"3ef2a368-afb0-4109-a846-123a6d3a88e0","Type":"ContainerStarted","Data":"869fdbf0777e76f4b25ce2183b64ed7dfb380314606e8b00ade88e97840d6bce"} Nov 25 18:30:14 crc kubenswrapper[4926]: I1125 18:30:14.998075 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerName="dnsmasq-dns" containerID="cri-o://822c9601bd17b9f55843eeca3824c8ebe99d77f52edf57aa9144d0b7943e6d87" gracePeriod=10 Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.088194 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-4nt6x"] Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.089645 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.099594 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.099848 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.102739 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.128900 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-4nt6x"] Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.132277 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.132334 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-ring-data-devices\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.132455 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-swiftconf\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.133209 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-dispersionconf\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.133371 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfwf2\" (UniqueName: \"kubernetes.io/projected/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-kube-api-access-hfwf2\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.133494 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-etc-swift\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.133523 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-combined-ca-bundle\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.133553 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-scripts\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: E1125 18:30:15.134127 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 18:30:15 crc kubenswrapper[4926]: E1125 18:30:15.134164 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 18:30:15 crc kubenswrapper[4926]: E1125 18:30:15.134227 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift podName:e1d8eee0-eb0b-41ad-b486-e7b20ffee29a nodeName:}" failed. No retries permitted until 2025-11-25 18:30:16.134202332 +0000 UTC m=+1046.519715937 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift") pod "swift-storage-0" (UID: "e1d8eee0-eb0b-41ad-b486-e7b20ffee29a") : configmap "swift-ring-files" not found Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.236659 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-dispersionconf\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.237086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfwf2\" (UniqueName: \"kubernetes.io/projected/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-kube-api-access-hfwf2\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.237129 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-etc-swift\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.237146 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-combined-ca-bundle\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.237169 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-scripts\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.237227 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-ring-data-devices\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.237269 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-swiftconf\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.238703 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-ring-data-devices\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.238784 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-scripts\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.239200 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-etc-swift\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.250726 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-combined-ca-bundle\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.250981 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-dispersionconf\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.252190 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-swiftconf\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.262016 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfwf2\" (UniqueName: \"kubernetes.io/projected/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-kube-api-access-hfwf2\") pod \"swift-ring-rebalance-4nt6x\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.396045 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.428944 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.442261 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-config\") pod \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.442402 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-dns-svc\") pod \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.442487 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55rrv\" (UniqueName: \"kubernetes.io/projected/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-kube-api-access-55rrv\") pod \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\" (UID: \"0b1b48ad-7c6a-43a0-8c30-83575173ddaf\") " Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.506815 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-kube-api-access-55rrv" (OuterVolumeSpecName: "kube-api-access-55rrv") pod "0b1b48ad-7c6a-43a0-8c30-83575173ddaf" (UID: "0b1b48ad-7c6a-43a0-8c30-83575173ddaf"). InnerVolumeSpecName "kube-api-access-55rrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.544816 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55rrv\" (UniqueName: \"kubernetes.io/projected/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-kube-api-access-55rrv\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.583056 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-config" (OuterVolumeSpecName: "config") pod "0b1b48ad-7c6a-43a0-8c30-83575173ddaf" (UID: "0b1b48ad-7c6a-43a0-8c30-83575173ddaf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.645611 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0b1b48ad-7c6a-43a0-8c30-83575173ddaf" (UID: "0b1b48ad-7c6a-43a0-8c30-83575173ddaf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.649809 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:15 crc kubenswrapper[4926]: I1125 18:30:15.649836 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b1b48ad-7c6a-43a0-8c30-83575173ddaf-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.006058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" event={"ID":"7196b83b-c064-4190-94f0-e314cd1a6f21","Type":"ContainerStarted","Data":"dbe38460d405937c937b1e7ece3f6c890a8490765848eaf95e011bac5b1b523c"} Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.011892 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1544f9d3-aef4-4a8c-af9f-af4bea56f954","Type":"ContainerStarted","Data":"8f27d10ba697d654deb67ac9147976cbeb9abc2c786b966bc923885625c9548f"} Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.018708 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.018656 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd4646b5-rk4lz" event={"ID":"0b1b48ad-7c6a-43a0-8c30-83575173ddaf","Type":"ContainerDied","Data":"4a38a6626476968003e76f6d54ea7dfc46dde519cf3ceb72994b4eb21afdef55"} Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.019012 4926 scope.go:117] "RemoveContainer" containerID="3870af6d4d054e7343f1f5bb997fe8f4904f2451d3eab09e3133dd410dff549a" Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.021273 4926 generic.go:334] "Generic (PLEG): container finished" podID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerID="822c9601bd17b9f55843eeca3824c8ebe99d77f52edf57aa9144d0b7943e6d87" exitCode=0 Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.021319 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" event={"ID":"9ced89d4-a75a-4c9b-b9e4-be11934799d3","Type":"ContainerDied","Data":"822c9601bd17b9f55843eeca3824c8ebe99d77f52edf57aa9144d0b7943e6d87"} Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.055185 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fd4646b5-rk4lz"] Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.055627 4926 scope.go:117] "RemoveContainer" containerID="6d4ed31464a7bbced35e36d00065d5363eb11415c17604f22eeda9956c286913" Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.062805 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fd4646b5-rk4lz"] Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.074898 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-4nt6x"] Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.158710 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:16 crc kubenswrapper[4926]: E1125 18:30:16.159012 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 18:30:16 crc kubenswrapper[4926]: E1125 18:30:16.159057 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 18:30:16 crc kubenswrapper[4926]: E1125 18:30:16.159147 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift podName:e1d8eee0-eb0b-41ad-b486-e7b20ffee29a nodeName:}" failed. No retries permitted until 2025-11-25 18:30:18.159113569 +0000 UTC m=+1048.544627174 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift") pod "swift-storage-0" (UID: "e1d8eee0-eb0b-41ad-b486-e7b20ffee29a") : configmap "swift-ring-files" not found Nov 25 18:30:16 crc kubenswrapper[4926]: I1125 18:30:16.340422 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" path="/var/lib/kubelet/pods/0b1b48ad-7c6a-43a0-8c30-83575173ddaf/volumes" Nov 25 18:30:17 crc kubenswrapper[4926]: I1125 18:30:17.032581 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4nt6x" event={"ID":"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1","Type":"ContainerStarted","Data":"ad12e145917c62186469075bf0441eac2a65e43dfd8b098eafa40d00761b9b5f"} Nov 25 18:30:18 crc kubenswrapper[4926]: I1125 18:30:18.196640 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:18 crc kubenswrapper[4926]: E1125 18:30:18.196811 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 18:30:18 crc kubenswrapper[4926]: E1125 18:30:18.197359 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 18:30:18 crc kubenswrapper[4926]: E1125 18:30:18.197514 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift podName:e1d8eee0-eb0b-41ad-b486-e7b20ffee29a nodeName:}" failed. No retries permitted until 2025-11-25 18:30:22.197484894 +0000 UTC m=+1052.582998509 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift") pod "swift-storage-0" (UID: "e1d8eee0-eb0b-41ad-b486-e7b20ffee29a") : configmap "swift-ring-files" not found Nov 25 18:30:19 crc kubenswrapper[4926]: I1125 18:30:19.974677 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.045539 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-dns-svc\") pod \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.045708 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-config\") pod \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.045787 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhnmk\" (UniqueName: \"kubernetes.io/projected/9ced89d4-a75a-4c9b-b9e4-be11934799d3-kube-api-access-zhnmk\") pod \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\" (UID: \"9ced89d4-a75a-4c9b-b9e4-be11934799d3\") " Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.083085 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ced89d4-a75a-4c9b-b9e4-be11934799d3-kube-api-access-zhnmk" (OuterVolumeSpecName: "kube-api-access-zhnmk") pod "9ced89d4-a75a-4c9b-b9e4-be11934799d3" (UID: "9ced89d4-a75a-4c9b-b9e4-be11934799d3"). InnerVolumeSpecName "kube-api-access-zhnmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.084165 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-6mv24" event={"ID":"e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230","Type":"ContainerStarted","Data":"9f1c5e9b9e7c290e5c1ebf4b0edfa00b742e1653d5cd63b16fa19550c70df477"} Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.102019 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9ced89d4-a75a-4c9b-b9e4-be11934799d3" (UID: "9ced89d4-a75a-4c9b-b9e4-be11934799d3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.104051 4926 generic.go:334] "Generic (PLEG): container finished" podID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerID="bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81" exitCode=0 Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.105480 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" event={"ID":"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec","Type":"ContainerDied","Data":"bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81"} Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.109224 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" event={"ID":"9ced89d4-a75a-4c9b-b9e4-be11934799d3","Type":"ContainerDied","Data":"fca0e690ac622f5f1d6c91e4fdbde294af5746da157a68e0e4a31df6f1c2bca4"} Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.109285 4926 scope.go:117] "RemoveContainer" containerID="822c9601bd17b9f55843eeca3824c8ebe99d77f52edf57aa9144d0b7943e6d87" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.109447 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57fd94878f-v6npq" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.113338 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-6mv24" podStartSLOduration=7.113305574 podStartE2EDuration="7.113305574s" podCreationTimestamp="2025-11-25 18:30:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:30:20.109158385 +0000 UTC m=+1050.494671990" watchObservedRunningTime="2025-11-25 18:30:20.113305574 +0000 UTC m=+1050.498819179" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.126346 4926 generic.go:334] "Generic (PLEG): container finished" podID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerID="15a211cd75c99d1669ebc8271259274b7f0f4bbf8ba50c55317786f90abbf1ff" exitCode=0 Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.126605 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" event={"ID":"7196b83b-c064-4190-94f0-e314cd1a6f21","Type":"ContainerDied","Data":"15a211cd75c99d1669ebc8271259274b7f0f4bbf8ba50c55317786f90abbf1ff"} Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.144513 4926 generic.go:334] "Generic (PLEG): container finished" podID="3ef2a368-afb0-4109-a846-123a6d3a88e0" containerID="e01e32f8229c046af992c3b71e031783d2a440af79965f0c25890b24804db554" exitCode=0 Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.144925 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" event={"ID":"3ef2a368-afb0-4109-a846-123a6d3a88e0","Type":"ContainerDied","Data":"e01e32f8229c046af992c3b71e031783d2a440af79965f0c25890b24804db554"} Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.145172 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-config" (OuterVolumeSpecName: "config") pod "9ced89d4-a75a-4c9b-b9e4-be11934799d3" (UID: "9ced89d4-a75a-4c9b-b9e4-be11934799d3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.185972 4926 scope.go:117] "RemoveContainer" containerID="9f7041fcc70c1176081f25ce6e031efe523399be665bb51b2772135ed21d11e1" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.187321 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.187437 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ced89d4-a75a-4c9b-b9e4-be11934799d3-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.187504 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhnmk\" (UniqueName: \"kubernetes.io/projected/9ced89d4-a75a-4c9b-b9e4-be11934799d3-kube-api-access-zhnmk\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.475975 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57fd94878f-v6npq"] Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.484167 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57fd94878f-v6npq"] Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.787766 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.899725 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-ovsdbserver-sb\") pod \"3ef2a368-afb0-4109-a846-123a6d3a88e0\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.899801 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-config\") pod \"3ef2a368-afb0-4109-a846-123a6d3a88e0\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.899850 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7vj4\" (UniqueName: \"kubernetes.io/projected/3ef2a368-afb0-4109-a846-123a6d3a88e0-kube-api-access-m7vj4\") pod \"3ef2a368-afb0-4109-a846-123a6d3a88e0\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.899958 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-dns-svc\") pod \"3ef2a368-afb0-4109-a846-123a6d3a88e0\" (UID: \"3ef2a368-afb0-4109-a846-123a6d3a88e0\") " Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.906543 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ef2a368-afb0-4109-a846-123a6d3a88e0-kube-api-access-m7vj4" (OuterVolumeSpecName: "kube-api-access-m7vj4") pod "3ef2a368-afb0-4109-a846-123a6d3a88e0" (UID: "3ef2a368-afb0-4109-a846-123a6d3a88e0"). InnerVolumeSpecName "kube-api-access-m7vj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.923496 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3ef2a368-afb0-4109-a846-123a6d3a88e0" (UID: "3ef2a368-afb0-4109-a846-123a6d3a88e0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.923517 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3ef2a368-afb0-4109-a846-123a6d3a88e0" (UID: "3ef2a368-afb0-4109-a846-123a6d3a88e0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:20 crc kubenswrapper[4926]: I1125 18:30:20.924437 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-config" (OuterVolumeSpecName: "config") pod "3ef2a368-afb0-4109-a846-123a6d3a88e0" (UID: "3ef2a368-afb0-4109-a846-123a6d3a88e0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.001957 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.002005 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.002016 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7vj4\" (UniqueName: \"kubernetes.io/projected/3ef2a368-afb0-4109-a846-123a6d3a88e0-kube-api-access-m7vj4\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.002027 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ef2a368-afb0-4109-a846-123a6d3a88e0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.153359 4926 generic.go:334] "Generic (PLEG): container finished" podID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerID="593d993183ad18fe49252cda910b3ed5a128daac5fa20808848f040334045ca6" exitCode=0 Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.153457 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b04996f9-1035-4982-bd9b-f96ee30cd663","Type":"ContainerDied","Data":"593d993183ad18fe49252cda910b3ed5a128daac5fa20808848f040334045ca6"} Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.156614 4926 generic.go:334] "Generic (PLEG): container finished" podID="ac5169d3-6efd-4929-8f0a-b8cfae948182" containerID="c017427b65ba44c2079bdd71bc3ed9e2ee504d3d4e6dff488c9c5430624f9ef0" exitCode=0 Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.156680 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ac5169d3-6efd-4929-8f0a-b8cfae948182","Type":"ContainerDied","Data":"c017427b65ba44c2079bdd71bc3ed9e2ee504d3d4e6dff488c9c5430624f9ef0"} Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.159890 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" event={"ID":"3ef2a368-afb0-4109-a846-123a6d3a88e0","Type":"ContainerDied","Data":"869fdbf0777e76f4b25ce2183b64ed7dfb380314606e8b00ade88e97840d6bce"} Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.159927 4926 scope.go:117] "RemoveContainer" containerID="e01e32f8229c046af992c3b71e031783d2a440af79965f0c25890b24804db554" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.159996 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-748bf9bd79-zt2dw" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.172604 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" event={"ID":"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec","Type":"ContainerStarted","Data":"769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7"} Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.172800 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.217937 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-748bf9bd79-zt2dw"] Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.223801 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-748bf9bd79-zt2dw"] Nov 25 18:30:21 crc kubenswrapper[4926]: I1125 18:30:21.237334 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" podStartSLOduration=8.237303495 podStartE2EDuration="8.237303495s" podCreationTimestamp="2025-11-25 18:30:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:30:21.229292596 +0000 UTC m=+1051.614806211" watchObservedRunningTime="2025-11-25 18:30:21.237303495 +0000 UTC m=+1051.622817110" Nov 25 18:30:22 crc kubenswrapper[4926]: I1125 18:30:22.185589 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" event={"ID":"7196b83b-c064-4190-94f0-e314cd1a6f21","Type":"ContainerStarted","Data":"5f135fff9b75d69b77abffdf1c4a8f93e747fc59df12bd3ab5d7a9a82ba4c26c"} Nov 25 18:30:22 crc kubenswrapper[4926]: I1125 18:30:22.185732 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:22 crc kubenswrapper[4926]: I1125 18:30:22.206633 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" podStartSLOduration=9.206610106 podStartE2EDuration="9.206610106s" podCreationTimestamp="2025-11-25 18:30:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:30:22.205422215 +0000 UTC m=+1052.590935820" watchObservedRunningTime="2025-11-25 18:30:22.206610106 +0000 UTC m=+1052.592123711" Nov 25 18:30:22 crc kubenswrapper[4926]: I1125 18:30:22.227847 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:22 crc kubenswrapper[4926]: E1125 18:30:22.228180 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 18:30:22 crc kubenswrapper[4926]: E1125 18:30:22.228266 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 18:30:22 crc kubenswrapper[4926]: E1125 18:30:22.228423 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift podName:e1d8eee0-eb0b-41ad-b486-e7b20ffee29a nodeName:}" failed. No retries permitted until 2025-11-25 18:30:30.228395632 +0000 UTC m=+1060.613909237 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift") pod "swift-storage-0" (UID: "e1d8eee0-eb0b-41ad-b486-e7b20ffee29a") : configmap "swift-ring-files" not found Nov 25 18:30:22 crc kubenswrapper[4926]: I1125 18:30:22.341777 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ef2a368-afb0-4109-a846-123a6d3a88e0" path="/var/lib/kubelet/pods/3ef2a368-afb0-4109-a846-123a6d3a88e0/volumes" Nov 25 18:30:22 crc kubenswrapper[4926]: I1125 18:30:22.342474 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" path="/var/lib/kubelet/pods/9ced89d4-a75a-4c9b-b9e4-be11934799d3/volumes" Nov 25 18:30:23 crc kubenswrapper[4926]: I1125 18:30:23.196692 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1544f9d3-aef4-4a8c-af9f-af4bea56f954","Type":"ContainerStarted","Data":"35c8d4c4afd893ecae15c6dcb615fce2f803b5b600cd7ed7f5a57360fa2d6310"} Nov 25 18:30:23 crc kubenswrapper[4926]: I1125 18:30:23.200083 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4nt6x" event={"ID":"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1","Type":"ContainerStarted","Data":"cecc7d931092c81dbc8f1d820ce4cb861a88cde9ff5f31e58e9c73372cfa1d1a"} Nov 25 18:30:23 crc kubenswrapper[4926]: I1125 18:30:23.216730 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b04996f9-1035-4982-bd9b-f96ee30cd663","Type":"ContainerStarted","Data":"fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9"} Nov 25 18:30:23 crc kubenswrapper[4926]: I1125 18:30:23.226086 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-4nt6x" podStartSLOduration=1.46084375 podStartE2EDuration="8.22606316s" podCreationTimestamp="2025-11-25 18:30:15 +0000 UTC" firstStartedPulling="2025-11-25 18:30:16.091791049 +0000 UTC m=+1046.477304654" lastFinishedPulling="2025-11-25 18:30:22.857010449 +0000 UTC m=+1053.242524064" observedRunningTime="2025-11-25 18:30:23.219274473 +0000 UTC m=+1053.604788098" watchObservedRunningTime="2025-11-25 18:30:23.22606316 +0000 UTC m=+1053.611576775" Nov 25 18:30:23 crc kubenswrapper[4926]: I1125 18:30:23.237946 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ac5169d3-6efd-4929-8f0a-b8cfae948182","Type":"ContainerStarted","Data":"d2e2948ff717be2aa08fd5a6972fe12c1467bb1485db619b57b3f6cb7136a3c1"} Nov 25 18:30:23 crc kubenswrapper[4926]: I1125 18:30:23.253791 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=23.941318613 podStartE2EDuration="1m6.25374952s" podCreationTimestamp="2025-11-25 18:29:17 +0000 UTC" firstStartedPulling="2025-11-25 18:29:19.935858518 +0000 UTC m=+990.321372123" lastFinishedPulling="2025-11-25 18:30:02.248289425 +0000 UTC m=+1032.633803030" observedRunningTime="2025-11-25 18:30:23.240881385 +0000 UTC m=+1053.626394990" watchObservedRunningTime="2025-11-25 18:30:23.25374952 +0000 UTC m=+1053.639263165" Nov 25 18:30:23 crc kubenswrapper[4926]: I1125 18:30:23.268696 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=61.101048985 podStartE2EDuration="1m5.268663597s" podCreationTimestamp="2025-11-25 18:29:18 +0000 UTC" firstStartedPulling="2025-11-25 18:29:57.349537882 +0000 UTC m=+1027.735051487" lastFinishedPulling="2025-11-25 18:30:01.517152484 +0000 UTC m=+1031.902666099" observedRunningTime="2025-11-25 18:30:23.262962519 +0000 UTC m=+1053.648476124" watchObservedRunningTime="2025-11-25 18:30:23.268663597 +0000 UTC m=+1053.654177202" Nov 25 18:30:24 crc kubenswrapper[4926]: I1125 18:30:24.254686 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1544f9d3-aef4-4a8c-af9f-af4bea56f954","Type":"ContainerStarted","Data":"6eb3287e407ff78f976903f92c38535fcdd8d3385d70fe7a2f58ed4d7ee31d80"} Nov 25 18:30:24 crc kubenswrapper[4926]: I1125 18:30:24.254858 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 18:30:24 crc kubenswrapper[4926]: I1125 18:30:24.274425 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.555627124 podStartE2EDuration="11.274404975s" podCreationTimestamp="2025-11-25 18:30:13 +0000 UTC" firstStartedPulling="2025-11-25 18:30:15.069556883 +0000 UTC m=+1045.455070488" lastFinishedPulling="2025-11-25 18:30:22.788334734 +0000 UTC m=+1053.173848339" observedRunningTime="2025-11-25 18:30:24.273299636 +0000 UTC m=+1054.658813371" watchObservedRunningTime="2025-11-25 18:30:24.274404975 +0000 UTC m=+1054.659918580" Nov 25 18:30:27 crc kubenswrapper[4926]: E1125 18:30:27.262697 4926 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.212:53514->38.102.83.212:46611: write tcp 38.102.83.212:53514->38.102.83.212:46611: write: broken pipe Nov 25 18:30:27 crc kubenswrapper[4926]: E1125 18:30:27.417105 4926 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.212:53528->38.102.83.212:46611: write tcp 38.102.83.212:53528->38.102.83.212:46611: write: broken pipe Nov 25 18:30:28 crc kubenswrapper[4926]: I1125 18:30:28.297716 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerStarted","Data":"10822e36f188fd5f5964ca79b7a37d1e98dfa52ec8a705e7affd95d599b78a0d"} Nov 25 18:30:28 crc kubenswrapper[4926]: I1125 18:30:28.558558 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.158667 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.255275 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.255332 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.261904 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7957cdccbf-jhg88"] Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.311702 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" podUID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerName="dnsmasq-dns" containerID="cri-o://769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7" gracePeriod=10 Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.513678 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.668156 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.796852 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.903690 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shzn7\" (UniqueName: \"kubernetes.io/projected/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-kube-api-access-shzn7\") pod \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.903833 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-config\") pod \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " Nov 25 18:30:29 crc kubenswrapper[4926]: I1125 18:30:29.903971 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-dns-svc\") pod \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\" (UID: \"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec\") " Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.073173 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-kube-api-access-shzn7" (OuterVolumeSpecName: "kube-api-access-shzn7") pod "ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" (UID: "ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec"). InnerVolumeSpecName "kube-api-access-shzn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.109223 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shzn7\" (UniqueName: \"kubernetes.io/projected/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-kube-api-access-shzn7\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.135692 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-config" (OuterVolumeSpecName: "config") pod "ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" (UID: "ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.144683 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" (UID: "ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.211095 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.211138 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.312861 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.313143 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.313183 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.313282 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift podName:e1d8eee0-eb0b-41ad-b486-e7b20ffee29a nodeName:}" failed. No retries permitted until 2025-11-25 18:30:46.313250667 +0000 UTC m=+1076.698764282 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift") pod "swift-storage-0" (UID: "e1d8eee0-eb0b-41ad-b486-e7b20ffee29a") : configmap "swift-ring-files" not found Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.322577 4926 generic.go:334] "Generic (PLEG): container finished" podID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerID="769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7" exitCode=0 Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.322632 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.322706 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" event={"ID":"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec","Type":"ContainerDied","Data":"769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7"} Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.322796 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7957cdccbf-jhg88" event={"ID":"ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec","Type":"ContainerDied","Data":"0ed3982b563f3779e2853039b19196158359cf4754ef6d81f5201c4b5ae26d3c"} Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.322828 4926 scope.go:117] "RemoveContainer" containerID="769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.352074 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.352122 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.353247 4926 scope.go:117] "RemoveContainer" containerID="bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.377184 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7957cdccbf-jhg88"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.383511 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7957cdccbf-jhg88"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.390462 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6c14-account-create-update-jvtzw"] Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.391092 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391123 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.391153 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ef2a368-afb0-4109-a846-123a6d3a88e0" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391163 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ef2a368-afb0-4109-a846-123a6d3a88e0" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.391179 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391187 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.391205 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391213 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.391233 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391241 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.391266 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391277 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.391296 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391305 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391525 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ef2a368-afb0-4109-a846-123a6d3a88e0" containerName="init" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391590 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391620 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ced89d4-a75a-4c9b-b9e4-be11934799d3" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.391643 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b1b48ad-7c6a-43a0-8c30-83575173ddaf" containerName="dnsmasq-dns" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.392600 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.396612 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.399504 4926 scope.go:117] "RemoveContainer" containerID="769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.399632 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c14-account-create-update-jvtzw"] Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.400527 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7\": container with ID starting with 769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7 not found: ID does not exist" containerID="769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.400577 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7"} err="failed to get container status \"769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7\": rpc error: code = NotFound desc = could not find container \"769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7\": container with ID starting with 769ca2360524b4e729cf781515f52e8bc4ba56030c19f262d5a6a253c9fc19d7 not found: ID does not exist" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.400608 4926 scope.go:117] "RemoveContainer" containerID="bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81" Nov 25 18:30:30 crc kubenswrapper[4926]: E1125 18:30:30.403733 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81\": container with ID starting with bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81 not found: ID does not exist" containerID="bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.403767 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81"} err="failed to get container status \"bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81\": rpc error: code = NotFound desc = could not find container \"bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81\": container with ID starting with bfae4aeea4ac179fc4518a1f1c646b5004153802df98fb2ed760905c6755fb81 not found: ID does not exist" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.404363 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-vkrp7"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.405511 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.460688 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-vkrp7"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.516622 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzqsr\" (UniqueName: \"kubernetes.io/projected/5a669dd4-2149-4a89-bf40-6bbe36867139-kube-api-access-zzqsr\") pod \"keystone-6c14-account-create-update-jvtzw\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.516687 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bvc9\" (UniqueName: \"kubernetes.io/projected/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-kube-api-access-8bvc9\") pod \"keystone-db-create-vkrp7\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.516769 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-operator-scripts\") pod \"keystone-db-create-vkrp7\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.516795 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a669dd4-2149-4a89-bf40-6bbe36867139-operator-scripts\") pod \"keystone-6c14-account-create-update-jvtzw\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.534535 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.619617 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzqsr\" (UniqueName: \"kubernetes.io/projected/5a669dd4-2149-4a89-bf40-6bbe36867139-kube-api-access-zzqsr\") pod \"keystone-6c14-account-create-update-jvtzw\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.619704 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bvc9\" (UniqueName: \"kubernetes.io/projected/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-kube-api-access-8bvc9\") pod \"keystone-db-create-vkrp7\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.619804 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-operator-scripts\") pod \"keystone-db-create-vkrp7\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.619827 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a669dd4-2149-4a89-bf40-6bbe36867139-operator-scripts\") pod \"keystone-6c14-account-create-update-jvtzw\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.620702 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a669dd4-2149-4a89-bf40-6bbe36867139-operator-scripts\") pod \"keystone-6c14-account-create-update-jvtzw\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.621886 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-operator-scripts\") pod \"keystone-db-create-vkrp7\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.656555 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bvc9\" (UniqueName: \"kubernetes.io/projected/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-kube-api-access-8bvc9\") pod \"keystone-db-create-vkrp7\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.657470 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzqsr\" (UniqueName: \"kubernetes.io/projected/5a669dd4-2149-4a89-bf40-6bbe36867139-kube-api-access-zzqsr\") pod \"keystone-6c14-account-create-update-jvtzw\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.735124 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-8vpf5"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.736316 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.750016 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-8vpf5"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.796213 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.807142 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.826298 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6btx\" (UniqueName: \"kubernetes.io/projected/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-kube-api-access-f6btx\") pod \"placement-db-create-8vpf5\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.826400 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-operator-scripts\") pod \"placement-db-create-8vpf5\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.875691 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-eebe-account-create-update-kwc6r"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.881462 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.884084 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.888599 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-eebe-account-create-update-kwc6r"] Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.928321 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6btx\" (UniqueName: \"kubernetes.io/projected/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-kube-api-access-f6btx\") pod \"placement-db-create-8vpf5\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.928385 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae582872-4d2a-423b-b1e4-96ae3533b44b-operator-scripts\") pod \"placement-eebe-account-create-update-kwc6r\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.928431 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-operator-scripts\") pod \"placement-db-create-8vpf5\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.928504 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mwkm\" (UniqueName: \"kubernetes.io/projected/ae582872-4d2a-423b-b1e4-96ae3533b44b-kube-api-access-6mwkm\") pod \"placement-eebe-account-create-update-kwc6r\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.929932 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-operator-scripts\") pod \"placement-db-create-8vpf5\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:30 crc kubenswrapper[4926]: I1125 18:30:30.949407 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6btx\" (UniqueName: \"kubernetes.io/projected/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-kube-api-access-f6btx\") pod \"placement-db-create-8vpf5\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.029633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mwkm\" (UniqueName: \"kubernetes.io/projected/ae582872-4d2a-423b-b1e4-96ae3533b44b-kube-api-access-6mwkm\") pod \"placement-eebe-account-create-update-kwc6r\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.030116 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae582872-4d2a-423b-b1e4-96ae3533b44b-operator-scripts\") pod \"placement-eebe-account-create-update-kwc6r\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.030955 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae582872-4d2a-423b-b1e4-96ae3533b44b-operator-scripts\") pod \"placement-eebe-account-create-update-kwc6r\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.047389 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mwkm\" (UniqueName: \"kubernetes.io/projected/ae582872-4d2a-423b-b1e4-96ae3533b44b-kube-api-access-6mwkm\") pod \"placement-eebe-account-create-update-kwc6r\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.064603 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.215696 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.334071 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c14-account-create-update-jvtzw"] Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.340485 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerStarted","Data":"4e8f747c818f9aeafc18233c444e1f4e4a84ea9ae8d9650f8c09d2f7276d0c0e"} Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.342468 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-vkrp7"] Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.461320 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.557464 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-8vpf5"] Nov 25 18:30:31 crc kubenswrapper[4926]: I1125 18:30:31.659182 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-eebe-account-create-update-kwc6r"] Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.341841 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec" path="/var/lib/kubelet/pods/ce4150ed-bdbc-4b73-b0e0-7c906b1d47ec/volumes" Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.347489 4926 generic.go:334] "Generic (PLEG): container finished" podID="3c2aa2ba-b60a-4fb5-9879-d825e4885c92" containerID="394cfee1be3b0cee00702e495ad12294915035ca76e1f4f40e977bf45f0a208c" exitCode=0 Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.347760 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vkrp7" event={"ID":"3c2aa2ba-b60a-4fb5-9879-d825e4885c92","Type":"ContainerDied","Data":"394cfee1be3b0cee00702e495ad12294915035ca76e1f4f40e977bf45f0a208c"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.347796 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vkrp7" event={"ID":"3c2aa2ba-b60a-4fb5-9879-d825e4885c92","Type":"ContainerStarted","Data":"ff69802a4c9cdb7cc396b8e5f779080263a2283603bf374b96d781e769ef2784"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.352268 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-eebe-account-create-update-kwc6r" event={"ID":"ae582872-4d2a-423b-b1e4-96ae3533b44b","Type":"ContainerStarted","Data":"2062694dcc94a5dc24b7487b7cc566c9d3d1879cae1f70927b28183920fc0551"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.352315 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-eebe-account-create-update-kwc6r" event={"ID":"ae582872-4d2a-423b-b1e4-96ae3533b44b","Type":"ContainerStarted","Data":"3da8b400ed018b9b0082167ddb4fa14d02bc44fb30a3a9c53d6bffe87749de64"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.353663 4926 generic.go:334] "Generic (PLEG): container finished" podID="5a669dd4-2149-4a89-bf40-6bbe36867139" containerID="5415f1164bfc2688bdafc9692d2efdf755869afa8b0a38fa316e7eb172a41a87" exitCode=0 Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.353781 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c14-account-create-update-jvtzw" event={"ID":"5a669dd4-2149-4a89-bf40-6bbe36867139","Type":"ContainerDied","Data":"5415f1164bfc2688bdafc9692d2efdf755869afa8b0a38fa316e7eb172a41a87"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.353815 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c14-account-create-update-jvtzw" event={"ID":"5a669dd4-2149-4a89-bf40-6bbe36867139","Type":"ContainerStarted","Data":"5e6507843790042f2cc19c47578a461102d83e6a0a3833a04d7dcb112a66b994"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.355482 4926 generic.go:334] "Generic (PLEG): container finished" podID="c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" containerID="cecc7d931092c81dbc8f1d820ce4cb861a88cde9ff5f31e58e9c73372cfa1d1a" exitCode=0 Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.355505 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4nt6x" event={"ID":"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1","Type":"ContainerDied","Data":"cecc7d931092c81dbc8f1d820ce4cb861a88cde9ff5f31e58e9c73372cfa1d1a"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.358336 4926 generic.go:334] "Generic (PLEG): container finished" podID="d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9" containerID="9ed5f7826121be6ef64d31324c9f90d81e46ac1093557555b93d14a7ab18d815" exitCode=0 Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.358444 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-8vpf5" event={"ID":"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9","Type":"ContainerDied","Data":"9ed5f7826121be6ef64d31324c9f90d81e46ac1093557555b93d14a7ab18d815"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.358495 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-8vpf5" event={"ID":"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9","Type":"ContainerStarted","Data":"b8bacc50ce1eb5615dde45964ba02254d9b9d41770aac9f6b96fa35bce28c35b"} Nov 25 18:30:32 crc kubenswrapper[4926]: I1125 18:30:32.441231 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-eebe-account-create-update-kwc6r" podStartSLOduration=2.441204149 podStartE2EDuration="2.441204149s" podCreationTimestamp="2025-11-25 18:30:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:30:32.428751136 +0000 UTC m=+1062.814264751" watchObservedRunningTime="2025-11-25 18:30:32.441204149 +0000 UTC m=+1062.826717764" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.026958 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-r9q84"] Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.028670 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.034784 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-r9q84"] Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.173078 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-operator-scripts\") pod \"watcher-db-create-r9q84\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.173172 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4vj5\" (UniqueName: \"kubernetes.io/projected/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-kube-api-access-l4vj5\") pod \"watcher-db-create-r9q84\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.204690 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-de89-account-create-update-shz6t"] Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.206250 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.214973 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-de89-account-create-update-shz6t"] Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.216030 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.274739 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-operator-scripts\") pod \"watcher-db-create-r9q84\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.274837 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4vj5\" (UniqueName: \"kubernetes.io/projected/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-kube-api-access-l4vj5\") pod \"watcher-db-create-r9q84\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.275847 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-operator-scripts\") pod \"watcher-db-create-r9q84\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.310233 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4vj5\" (UniqueName: \"kubernetes.io/projected/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-kube-api-access-l4vj5\") pod \"watcher-db-create-r9q84\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.370112 4926 generic.go:334] "Generic (PLEG): container finished" podID="ae582872-4d2a-423b-b1e4-96ae3533b44b" containerID="2062694dcc94a5dc24b7487b7cc566c9d3d1879cae1f70927b28183920fc0551" exitCode=0 Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.370857 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-eebe-account-create-update-kwc6r" event={"ID":"ae582872-4d2a-423b-b1e4-96ae3533b44b","Type":"ContainerDied","Data":"2062694dcc94a5dc24b7487b7cc566c9d3d1879cae1f70927b28183920fc0551"} Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.377308 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhm5n\" (UniqueName: \"kubernetes.io/projected/d389a676-44e5-4209-a6a7-752a361595ad-kube-api-access-qhm5n\") pod \"watcher-de89-account-create-update-shz6t\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.377472 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d389a676-44e5-4209-a6a7-752a361595ad-operator-scripts\") pod \"watcher-de89-account-create-update-shz6t\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.400429 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.479622 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d389a676-44e5-4209-a6a7-752a361595ad-operator-scripts\") pod \"watcher-de89-account-create-update-shz6t\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.479762 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhm5n\" (UniqueName: \"kubernetes.io/projected/d389a676-44e5-4209-a6a7-752a361595ad-kube-api-access-qhm5n\") pod \"watcher-de89-account-create-update-shz6t\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.481573 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d389a676-44e5-4209-a6a7-752a361595ad-operator-scripts\") pod \"watcher-de89-account-create-update-shz6t\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.499955 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhm5n\" (UniqueName: \"kubernetes.io/projected/d389a676-44e5-4209-a6a7-752a361595ad-kube-api-access-qhm5n\") pod \"watcher-de89-account-create-update-shz6t\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:33 crc kubenswrapper[4926]: I1125 18:30:33.522965 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:34 crc kubenswrapper[4926]: I1125 18:30:34.162077 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.149162 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-wwqdd" podUID="b840840b-ff6b-439b-b043-7afd451ca6e7" containerName="ovn-controller" probeResult="failure" output=< Nov 25 18:30:35 crc kubenswrapper[4926]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 18:30:35 crc kubenswrapper[4926]: > Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.243402 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.436199 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c14-account-create-update-jvtzw" event={"ID":"5a669dd4-2149-4a89-bf40-6bbe36867139","Type":"ContainerDied","Data":"5e6507843790042f2cc19c47578a461102d83e6a0a3833a04d7dcb112a66b994"} Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.436642 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e6507843790042f2cc19c47578a461102d83e6a0a3833a04d7dcb112a66b994" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.438863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4nt6x" event={"ID":"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1","Type":"ContainerDied","Data":"ad12e145917c62186469075bf0441eac2a65e43dfd8b098eafa40d00761b9b5f"} Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.438913 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad12e145917c62186469075bf0441eac2a65e43dfd8b098eafa40d00761b9b5f" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.441505 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.442436 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-8vpf5" event={"ID":"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9","Type":"ContainerDied","Data":"b8bacc50ce1eb5615dde45964ba02254d9b9d41770aac9f6b96fa35bce28c35b"} Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.442488 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8bacc50ce1eb5615dde45964ba02254d9b9d41770aac9f6b96fa35bce28c35b" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.449859 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vkrp7" event={"ID":"3c2aa2ba-b60a-4fb5-9879-d825e4885c92","Type":"ContainerDied","Data":"ff69802a4c9cdb7cc396b8e5f779080263a2283603bf374b96d781e769ef2784"} Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.449922 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff69802a4c9cdb7cc396b8e5f779080263a2283603bf374b96d781e769ef2784" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.451444 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-eebe-account-create-update-kwc6r" event={"ID":"ae582872-4d2a-423b-b1e4-96ae3533b44b","Type":"ContainerDied","Data":"3da8b400ed018b9b0082167ddb4fa14d02bc44fb30a3a9c53d6bffe87749de64"} Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.451517 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3da8b400ed018b9b0082167ddb4fa14d02bc44fb30a3a9c53d6bffe87749de64" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.463219 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.493232 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.499649 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.512132 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.518941 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-operator-scripts\") pod \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.519667 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9" (UID: "d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.519950 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6btx\" (UniqueName: \"kubernetes.io/projected/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-kube-api-access-f6btx\") pod \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\" (UID: \"d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.520426 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.531035 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-kube-api-access-f6btx" (OuterVolumeSpecName: "kube-api-access-f6btx") pod "d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9" (UID: "d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9"). InnerVolumeSpecName "kube-api-access-f6btx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621471 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzqsr\" (UniqueName: \"kubernetes.io/projected/5a669dd4-2149-4a89-bf40-6bbe36867139-kube-api-access-zzqsr\") pod \"5a669dd4-2149-4a89-bf40-6bbe36867139\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621643 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-ring-data-devices\") pod \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621700 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a669dd4-2149-4a89-bf40-6bbe36867139-operator-scripts\") pod \"5a669dd4-2149-4a89-bf40-6bbe36867139\" (UID: \"5a669dd4-2149-4a89-bf40-6bbe36867139\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621754 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-etc-swift\") pod \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621780 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfwf2\" (UniqueName: \"kubernetes.io/projected/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-kube-api-access-hfwf2\") pod \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621822 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-scripts\") pod \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621852 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-combined-ca-bundle\") pod \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621896 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae582872-4d2a-423b-b1e4-96ae3533b44b-operator-scripts\") pod \"ae582872-4d2a-423b-b1e4-96ae3533b44b\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621920 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-swiftconf\") pod \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.621980 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-operator-scripts\") pod \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622019 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bvc9\" (UniqueName: \"kubernetes.io/projected/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-kube-api-access-8bvc9\") pod \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\" (UID: \"3c2aa2ba-b60a-4fb5-9879-d825e4885c92\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622061 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-dispersionconf\") pod \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\" (UID: \"c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622106 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mwkm\" (UniqueName: \"kubernetes.io/projected/ae582872-4d2a-423b-b1e4-96ae3533b44b-kube-api-access-6mwkm\") pod \"ae582872-4d2a-423b-b1e4-96ae3533b44b\" (UID: \"ae582872-4d2a-423b-b1e4-96ae3533b44b\") " Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622582 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6btx\" (UniqueName: \"kubernetes.io/projected/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9-kube-api-access-f6btx\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622430 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" (UID: "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622567 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a669dd4-2149-4a89-bf40-6bbe36867139-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5a669dd4-2149-4a89-bf40-6bbe36867139" (UID: "5a669dd4-2149-4a89-bf40-6bbe36867139"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622600 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae582872-4d2a-423b-b1e4-96ae3533b44b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ae582872-4d2a-423b-b1e4-96ae3533b44b" (UID: "ae582872-4d2a-423b-b1e4-96ae3533b44b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.622651 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c2aa2ba-b60a-4fb5-9879-d825e4885c92" (UID: "3c2aa2ba-b60a-4fb5-9879-d825e4885c92"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.623432 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" (UID: "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.627569 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-kube-api-access-hfwf2" (OuterVolumeSpecName: "kube-api-access-hfwf2") pod "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" (UID: "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1"). InnerVolumeSpecName "kube-api-access-hfwf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.628281 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-kube-api-access-8bvc9" (OuterVolumeSpecName: "kube-api-access-8bvc9") pod "3c2aa2ba-b60a-4fb5-9879-d825e4885c92" (UID: "3c2aa2ba-b60a-4fb5-9879-d825e4885c92"). InnerVolumeSpecName "kube-api-access-8bvc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.629831 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a669dd4-2149-4a89-bf40-6bbe36867139-kube-api-access-zzqsr" (OuterVolumeSpecName: "kube-api-access-zzqsr") pod "5a669dd4-2149-4a89-bf40-6bbe36867139" (UID: "5a669dd4-2149-4a89-bf40-6bbe36867139"). InnerVolumeSpecName "kube-api-access-zzqsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.633842 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae582872-4d2a-423b-b1e4-96ae3533b44b-kube-api-access-6mwkm" (OuterVolumeSpecName: "kube-api-access-6mwkm") pod "ae582872-4d2a-423b-b1e4-96ae3533b44b" (UID: "ae582872-4d2a-423b-b1e4-96ae3533b44b"). InnerVolumeSpecName "kube-api-access-6mwkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.638491 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" (UID: "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.650996 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" (UID: "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.660446 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-scripts" (OuterVolumeSpecName: "scripts") pod "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" (UID: "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.680023 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" (UID: "c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724480 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mwkm\" (UniqueName: \"kubernetes.io/projected/ae582872-4d2a-423b-b1e4-96ae3533b44b-kube-api-access-6mwkm\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724522 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzqsr\" (UniqueName: \"kubernetes.io/projected/5a669dd4-2149-4a89-bf40-6bbe36867139-kube-api-access-zzqsr\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724532 4926 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724541 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a669dd4-2149-4a89-bf40-6bbe36867139-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724550 4926 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724560 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfwf2\" (UniqueName: \"kubernetes.io/projected/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-kube-api-access-hfwf2\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724568 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724576 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724584 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae582872-4d2a-423b-b1e4-96ae3533b44b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724593 4926 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724601 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724609 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bvc9\" (UniqueName: \"kubernetes.io/projected/3c2aa2ba-b60a-4fb5-9879-d825e4885c92-kube-api-access-8bvc9\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.724618 4926 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.835877 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-r9q84"] Nov 25 18:30:35 crc kubenswrapper[4926]: W1125 18:30:35.842905 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e59d2e8_b4ea_41c7_bab7_072e4090ccf6.slice/crio-d54be67559d74c7c2a814ec65c8178311270336c59b1a5e51119259e8c514943 WatchSource:0}: Error finding container d54be67559d74c7c2a814ec65c8178311270336c59b1a5e51119259e8c514943: Status 404 returned error can't find the container with id d54be67559d74c7c2a814ec65c8178311270336c59b1a5e51119259e8c514943 Nov 25 18:30:35 crc kubenswrapper[4926]: I1125 18:30:35.900080 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-de89-account-create-update-shz6t"] Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.470181 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerStarted","Data":"868fe4e7e4f75b0885aaa01f39fbeeaac678cc019ec3bb6ec37f6b74c8b40a06"} Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.485091 4926 generic.go:334] "Generic (PLEG): container finished" podID="0e59d2e8-b4ea-41c7-bab7-072e4090ccf6" containerID="d04b8cec76de6e94aef1595226a3c2cb4e58304166c64c7ac3f11bbc6e855294" exitCode=0 Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.485187 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-r9q84" event={"ID":"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6","Type":"ContainerDied","Data":"d04b8cec76de6e94aef1595226a3c2cb4e58304166c64c7ac3f11bbc6e855294"} Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.485225 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-r9q84" event={"ID":"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6","Type":"ContainerStarted","Data":"d54be67559d74c7c2a814ec65c8178311270336c59b1a5e51119259e8c514943"} Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487329 4926 generic.go:334] "Generic (PLEG): container finished" podID="d389a676-44e5-4209-a6a7-752a361595ad" containerID="71a79cba2f8eb38065b3fc1f681718e85f2cd5c82ff496380a84358f54b72c2b" exitCode=0 Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487496 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4nt6x" Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487511 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vkrp7" Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487577 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-8vpf5" Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487495 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-de89-account-create-update-shz6t" event={"ID":"d389a676-44e5-4209-a6a7-752a361595ad","Type":"ContainerDied","Data":"71a79cba2f8eb38065b3fc1f681718e85f2cd5c82ff496380a84358f54b72c2b"} Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487655 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-de89-account-create-update-shz6t" event={"ID":"d389a676-44e5-4209-a6a7-752a361595ad","Type":"ContainerStarted","Data":"04a9784b9289c6ca4d65f46d59c9b6a374e93f47d1aab2cfacdb89ba090509cf"} Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487721 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c14-account-create-update-jvtzw" Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.487817 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eebe-account-create-update-kwc6r" Nov 25 18:30:36 crc kubenswrapper[4926]: I1125 18:30:36.517480 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=35.755333797 podStartE2EDuration="1m14.517441685s" podCreationTimestamp="2025-11-25 18:29:22 +0000 UTC" firstStartedPulling="2025-11-25 18:29:57.079121504 +0000 UTC m=+1027.464635109" lastFinishedPulling="2025-11-25 18:30:35.841229382 +0000 UTC m=+1066.226742997" observedRunningTime="2025-11-25 18:30:36.501497512 +0000 UTC m=+1066.887011167" watchObservedRunningTime="2025-11-25 18:30:36.517441685 +0000 UTC m=+1066.902955310" Nov 25 18:30:37 crc kubenswrapper[4926]: I1125 18:30:37.923698 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:37 crc kubenswrapper[4926]: I1125 18:30:37.938160 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.069299 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhm5n\" (UniqueName: \"kubernetes.io/projected/d389a676-44e5-4209-a6a7-752a361595ad-kube-api-access-qhm5n\") pod \"d389a676-44e5-4209-a6a7-752a361595ad\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.069868 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4vj5\" (UniqueName: \"kubernetes.io/projected/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-kube-api-access-l4vj5\") pod \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.070058 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-operator-scripts\") pod \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\" (UID: \"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6\") " Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.070282 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d389a676-44e5-4209-a6a7-752a361595ad-operator-scripts\") pod \"d389a676-44e5-4209-a6a7-752a361595ad\" (UID: \"d389a676-44e5-4209-a6a7-752a361595ad\") " Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.070840 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0e59d2e8-b4ea-41c7-bab7-072e4090ccf6" (UID: "0e59d2e8-b4ea-41c7-bab7-072e4090ccf6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.070884 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d389a676-44e5-4209-a6a7-752a361595ad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d389a676-44e5-4209-a6a7-752a361595ad" (UID: "d389a676-44e5-4209-a6a7-752a361595ad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.071576 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.071696 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d389a676-44e5-4209-a6a7-752a361595ad-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.076188 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-kube-api-access-l4vj5" (OuterVolumeSpecName: "kube-api-access-l4vj5") pod "0e59d2e8-b4ea-41c7-bab7-072e4090ccf6" (UID: "0e59d2e8-b4ea-41c7-bab7-072e4090ccf6"). InnerVolumeSpecName "kube-api-access-l4vj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.076420 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d389a676-44e5-4209-a6a7-752a361595ad-kube-api-access-qhm5n" (OuterVolumeSpecName: "kube-api-access-qhm5n") pod "d389a676-44e5-4209-a6a7-752a361595ad" (UID: "d389a676-44e5-4209-a6a7-752a361595ad"). InnerVolumeSpecName "kube-api-access-qhm5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.173326 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhm5n\" (UniqueName: \"kubernetes.io/projected/d389a676-44e5-4209-a6a7-752a361595ad-kube-api-access-qhm5n\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.173368 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4vj5\" (UniqueName: \"kubernetes.io/projected/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6-kube-api-access-l4vj5\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.514445 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-r9q84" event={"ID":"0e59d2e8-b4ea-41c7-bab7-072e4090ccf6","Type":"ContainerDied","Data":"d54be67559d74c7c2a814ec65c8178311270336c59b1a5e51119259e8c514943"} Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.514897 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d54be67559d74c7c2a814ec65c8178311270336c59b1a5e51119259e8c514943" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.514522 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-r9q84" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.516949 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-de89-account-create-update-shz6t" event={"ID":"d389a676-44e5-4209-a6a7-752a361595ad","Type":"ContainerDied","Data":"04a9784b9289c6ca4d65f46d59c9b6a374e93f47d1aab2cfacdb89ba090509cf"} Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.516990 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04a9784b9289c6ca4d65f46d59c9b6a374e93f47d1aab2cfacdb89ba090509cf" Nov 25 18:30:38 crc kubenswrapper[4926]: I1125 18:30:38.517067 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-de89-account-create-update-shz6t" Nov 25 18:30:39 crc kubenswrapper[4926]: I1125 18:30:39.430894 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:39 crc kubenswrapper[4926]: I1125 18:30:39.430965 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:39 crc kubenswrapper[4926]: I1125 18:30:39.435900 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:39 crc kubenswrapper[4926]: I1125 18:30:39.535067 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.167897 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-wwqdd" podUID="b840840b-ff6b-439b-b043-7afd451ca6e7" containerName="ovn-controller" probeResult="failure" output=< Nov 25 18:30:40 crc kubenswrapper[4926]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 18:30:40 crc kubenswrapper[4926]: > Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.252683 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-qd88n" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.468922 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-wwqdd-config-t2srg"] Nov 25 18:30:40 crc kubenswrapper[4926]: E1125 18:30:40.469347 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a669dd4-2149-4a89-bf40-6bbe36867139" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469361 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a669dd4-2149-4a89-bf40-6bbe36867139" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: E1125 18:30:40.469375 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469381 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: E1125 18:30:40.469398 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e59d2e8-b4ea-41c7-bab7-072e4090ccf6" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469419 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e59d2e8-b4ea-41c7-bab7-072e4090ccf6" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: E1125 18:30:40.469431 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d389a676-44e5-4209-a6a7-752a361595ad" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469439 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d389a676-44e5-4209-a6a7-752a361595ad" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: E1125 18:30:40.469447 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" containerName="swift-ring-rebalance" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469453 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" containerName="swift-ring-rebalance" Nov 25 18:30:40 crc kubenswrapper[4926]: E1125 18:30:40.469466 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c2aa2ba-b60a-4fb5-9879-d825e4885c92" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469471 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c2aa2ba-b60a-4fb5-9879-d825e4885c92" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: E1125 18:30:40.469485 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae582872-4d2a-423b-b1e4-96ae3533b44b" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469490 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae582872-4d2a-423b-b1e4-96ae3533b44b" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469682 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e59d2e8-b4ea-41c7-bab7-072e4090ccf6" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469696 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a669dd4-2149-4a89-bf40-6bbe36867139" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469712 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae582872-4d2a-423b-b1e4-96ae3533b44b" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469720 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469731 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d389a676-44e5-4209-a6a7-752a361595ad" containerName="mariadb-account-create-update" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469739 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1" containerName="swift-ring-rebalance" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.469748 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c2aa2ba-b60a-4fb5-9879-d825e4885c92" containerName="mariadb-database-create" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.470402 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.480225 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.495642 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wwqdd-config-t2srg"] Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.532494 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-log-ovn\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.532564 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run-ovn\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.532664 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.532820 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l48xw\" (UniqueName: \"kubernetes.io/projected/e619cb52-411b-4008-80dd-770b4c6f6006-kube-api-access-l48xw\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.532872 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-scripts\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.532927 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-additional-scripts\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635129 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l48xw\" (UniqueName: \"kubernetes.io/projected/e619cb52-411b-4008-80dd-770b4c6f6006-kube-api-access-l48xw\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635158 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-scripts\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635203 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-additional-scripts\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635384 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-log-ovn\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635442 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run-ovn\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635488 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.635765 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-log-ovn\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.636053 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run-ovn\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.636228 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-additional-scripts\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.638309 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-scripts\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.659476 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l48xw\" (UniqueName: \"kubernetes.io/projected/e619cb52-411b-4008-80dd-770b4c6f6006-kube-api-access-l48xw\") pod \"ovn-controller-wwqdd-config-t2srg\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:40 crc kubenswrapper[4926]: I1125 18:30:40.833977 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:41 crc kubenswrapper[4926]: I1125 18:30:41.312100 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wwqdd-config-t2srg"] Nov 25 18:30:41 crc kubenswrapper[4926]: I1125 18:30:41.552402 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wwqdd-config-t2srg" event={"ID":"e619cb52-411b-4008-80dd-770b4c6f6006","Type":"ContainerStarted","Data":"5a3ee372d94e9a85f2b62c611078bbb7493a558e923cfdf5276e87254e55e505"} Nov 25 18:30:42 crc kubenswrapper[4926]: I1125 18:30:42.562198 4926 generic.go:334] "Generic (PLEG): container finished" podID="e619cb52-411b-4008-80dd-770b4c6f6006" containerID="625b53ae16a8f68d2bd7f99606e5fb62e2bbbf4051f296daeca969a2afcb0148" exitCode=0 Nov 25 18:30:42 crc kubenswrapper[4926]: I1125 18:30:42.562309 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wwqdd-config-t2srg" event={"ID":"e619cb52-411b-4008-80dd-770b4c6f6006","Type":"ContainerDied","Data":"625b53ae16a8f68d2bd7f99606e5fb62e2bbbf4051f296daeca969a2afcb0148"} Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.023000 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.023995 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="prometheus" containerID="cri-o://10822e36f188fd5f5964ca79b7a37d1e98dfa52ec8a705e7affd95d599b78a0d" gracePeriod=600 Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.024034 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="thanos-sidecar" containerID="cri-o://868fe4e7e4f75b0885aaa01f39fbeeaac678cc019ec3bb6ec37f6b74c8b40a06" gracePeriod=600 Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.024091 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="config-reloader" containerID="cri-o://4e8f747c818f9aeafc18233c444e1f4e4a84ea9ae8d9650f8c09d2f7276d0c0e" gracePeriod=600 Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.581282 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerID="4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84" exitCode=0 Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.581449 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8310425-a9bc-4c42-9caf-9c1a70041d2c","Type":"ContainerDied","Data":"4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84"} Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.597075 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerID="868fe4e7e4f75b0885aaa01f39fbeeaac678cc019ec3bb6ec37f6b74c8b40a06" exitCode=0 Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.597146 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerID="4e8f747c818f9aeafc18233c444e1f4e4a84ea9ae8d9650f8c09d2f7276d0c0e" exitCode=0 Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.597166 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerID="10822e36f188fd5f5964ca79b7a37d1e98dfa52ec8a705e7affd95d599b78a0d" exitCode=0 Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.597150 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerDied","Data":"868fe4e7e4f75b0885aaa01f39fbeeaac678cc019ec3bb6ec37f6b74c8b40a06"} Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.597311 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerDied","Data":"4e8f747c818f9aeafc18233c444e1f4e4a84ea9ae8d9650f8c09d2f7276d0c0e"} Nov 25 18:30:43 crc kubenswrapper[4926]: I1125 18:30:43.597442 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerDied","Data":"10822e36f188fd5f5964ca79b7a37d1e98dfa52ec8a705e7affd95d599b78a0d"} Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.000682 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.056724 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127311 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127413 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-prometheus-metric-storage-rulefiles-0\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127457 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-additional-scripts\") pod \"e619cb52-411b-4008-80dd-770b4c6f6006\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127484 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbkn5\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-kube-api-access-fbkn5\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127511 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127606 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run\") pod \"e619cb52-411b-4008-80dd-770b4c6f6006\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127632 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-tls-assets\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127737 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l48xw\" (UniqueName: \"kubernetes.io/projected/e619cb52-411b-4008-80dd-770b4c6f6006-kube-api-access-l48xw\") pod \"e619cb52-411b-4008-80dd-770b4c6f6006\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127815 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-log-ovn\") pod \"e619cb52-411b-4008-80dd-770b4c6f6006\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127856 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-thanos-prometheus-http-client-file\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127903 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-scripts\") pod \"e619cb52-411b-4008-80dd-770b4c6f6006\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127935 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-web-config\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127967 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run-ovn\") pod \"e619cb52-411b-4008-80dd-770b4c6f6006\" (UID: \"e619cb52-411b-4008-80dd-770b4c6f6006\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.127998 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config-out\") pod \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\" (UID: \"b2b632d4-1a26-46d8-bba4-ff02c365a9d2\") " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.136037 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config-out" (OuterVolumeSpecName: "config-out") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.136435 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.136951 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e619cb52-411b-4008-80dd-770b4c6f6006" (UID: "e619cb52-411b-4008-80dd-770b4c6f6006"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.140567 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e619cb52-411b-4008-80dd-770b4c6f6006" (UID: "e619cb52-411b-4008-80dd-770b4c6f6006"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.140976 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run" (OuterVolumeSpecName: "var-run") pod "e619cb52-411b-4008-80dd-770b4c6f6006" (UID: "e619cb52-411b-4008-80dd-770b4c6f6006"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.141059 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.143603 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e619cb52-411b-4008-80dd-770b4c6f6006" (UID: "e619cb52-411b-4008-80dd-770b4c6f6006"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.143886 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.147802 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config" (OuterVolumeSpecName: "config") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.148707 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-scripts" (OuterVolumeSpecName: "scripts") pod "e619cb52-411b-4008-80dd-770b4c6f6006" (UID: "e619cb52-411b-4008-80dd-770b4c6f6006"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.159182 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e619cb52-411b-4008-80dd-770b4c6f6006-kube-api-access-l48xw" (OuterVolumeSpecName: "kube-api-access-l48xw") pod "e619cb52-411b-4008-80dd-770b4c6f6006" (UID: "e619cb52-411b-4008-80dd-770b4c6f6006"). InnerVolumeSpecName "kube-api-access-l48xw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.159305 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-kube-api-access-fbkn5" (OuterVolumeSpecName: "kube-api-access-fbkn5") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "kube-api-access-fbkn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.162134 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-web-config" (OuterVolumeSpecName: "web-config") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.207435 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "b2b632d4-1a26-46d8-bba4-ff02c365a9d2" (UID: "b2b632d4-1a26-46d8-bba4-ff02c365a9d2"). InnerVolumeSpecName "pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230224 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") on node \"crc\" " Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230259 4926 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230271 4926 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230281 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbkn5\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-kube-api-access-fbkn5\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230294 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230311 4926 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230320 4926 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230328 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l48xw\" (UniqueName: \"kubernetes.io/projected/e619cb52-411b-4008-80dd-770b4c6f6006-kube-api-access-l48xw\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230335 4926 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230343 4926 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230351 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e619cb52-411b-4008-80dd-770b4c6f6006-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230359 4926 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-web-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230368 4926 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e619cb52-411b-4008-80dd-770b4c6f6006-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.230379 4926 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b2b632d4-1a26-46d8-bba4-ff02c365a9d2-config-out\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.253774 4926 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.253962 4926 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc") on node "crc" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.331438 4926 reconciler_common.go:293] "Volume detached for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") on node \"crc\" DevicePath \"\"" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.611623 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8310425-a9bc-4c42-9caf-9c1a70041d2c","Type":"ContainerStarted","Data":"12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989"} Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.613477 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.615150 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wwqdd-config-t2srg" event={"ID":"e619cb52-411b-4008-80dd-770b4c6f6006","Type":"ContainerDied","Data":"5a3ee372d94e9a85f2b62c611078bbb7493a558e923cfdf5276e87254e55e505"} Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.615188 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a3ee372d94e9a85f2b62c611078bbb7493a558e923cfdf5276e87254e55e505" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.615311 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wwqdd-config-t2srg" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.618484 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b2b632d4-1a26-46d8-bba4-ff02c365a9d2","Type":"ContainerDied","Data":"f6a12375ef3a563cbff524f973c0f17396432e8c666a63498ed727b7e35944bd"} Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.618566 4926 scope.go:117] "RemoveContainer" containerID="868fe4e7e4f75b0885aaa01f39fbeeaac678cc019ec3bb6ec37f6b74c8b40a06" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.618605 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.643188 4926 scope.go:117] "RemoveContainer" containerID="4e8f747c818f9aeafc18233c444e1f4e4a84ea9ae8d9650f8c09d2f7276d0c0e" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.653824 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.198702516 podStartE2EDuration="1m28.653794709s" podCreationTimestamp="2025-11-25 18:29:16 +0000 UTC" firstStartedPulling="2025-11-25 18:29:18.324905212 +0000 UTC m=+988.710418817" lastFinishedPulling="2025-11-25 18:30:08.779997415 +0000 UTC m=+1039.165511010" observedRunningTime="2025-11-25 18:30:44.644019815 +0000 UTC m=+1075.029533520" watchObservedRunningTime="2025-11-25 18:30:44.653794709 +0000 UTC m=+1075.039308324" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.678342 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.679664 4926 scope.go:117] "RemoveContainer" containerID="10822e36f188fd5f5964ca79b7a37d1e98dfa52ec8a705e7affd95d599b78a0d" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.686718 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.727311 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:30:44 crc kubenswrapper[4926]: E1125 18:30:44.728208 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="thanos-sidecar" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.728429 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="thanos-sidecar" Nov 25 18:30:44 crc kubenswrapper[4926]: E1125 18:30:44.728583 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="init-config-reloader" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.728693 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="init-config-reloader" Nov 25 18:30:44 crc kubenswrapper[4926]: E1125 18:30:44.728822 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e619cb52-411b-4008-80dd-770b4c6f6006" containerName="ovn-config" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.728937 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e619cb52-411b-4008-80dd-770b4c6f6006" containerName="ovn-config" Nov 25 18:30:44 crc kubenswrapper[4926]: E1125 18:30:44.729067 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="config-reloader" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.729180 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="config-reloader" Nov 25 18:30:44 crc kubenswrapper[4926]: E1125 18:30:44.729920 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="prometheus" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.730056 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="prometheus" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.730614 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="config-reloader" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.730797 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e619cb52-411b-4008-80dd-770b4c6f6006" containerName="ovn-config" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.730938 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="thanos-sidecar" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.731062 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" containerName="prometheus" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.734691 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.744085 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.744276 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.744205 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.744744 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.744864 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-9xpwm" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.744826 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.750500 4926 scope.go:117] "RemoveContainer" containerID="cbce76ede70b249c42a069ec335203867ae05f8ca403df80939ab682b263e607" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.757959 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.762336 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.848080 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.848137 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.848178 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.849768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.849835 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48wrh\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-kube-api-access-48wrh\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.849890 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.850056 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ca984ebe-e3bd-4720-9718-4cb972ee65e5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.850091 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.850150 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.850193 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.850247 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.951963 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952013 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48wrh\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-kube-api-access-48wrh\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952049 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952119 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ca984ebe-e3bd-4720-9718-4cb972ee65e5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952149 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952183 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952209 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952241 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952280 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952303 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.952342 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.953887 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ca984ebe-e3bd-4720-9718-4cb972ee65e5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.957337 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.957734 4926 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.957778 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/967f74eafc5d0ef2758f2567e8e6584104bc92d0318f34ed949bfc88cba8d50f/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.957943 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.958299 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.959751 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.961647 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.962202 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.965918 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.966632 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.975131 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48wrh\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-kube-api-access-48wrh\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:44 crc kubenswrapper[4926]: I1125 18:30:44.995298 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.122531 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-wwqdd-config-t2srg"] Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.140736 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-wwqdd-config-t2srg"] Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.152433 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-wwqdd" Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.191960 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.632199 4926 generic.go:334] "Generic (PLEG): container finished" podID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerID="1140a4cb17c37bbcea4321661bee910d6f09a5df0dc77e80ce7b4b99b90a2dc5" exitCode=0 Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.632269 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"38e40083-2404-4c67-88b5-41ccaf693c6e","Type":"ContainerDied","Data":"1140a4cb17c37bbcea4321661bee910d6f09a5df0dc77e80ce7b4b99b90a2dc5"} Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.635312 4926 generic.go:334] "Generic (PLEG): container finished" podID="e53ddff3-6cac-43f4-98c6-f909431098f1" containerID="03e7a04f31ba17233ccde141bd80aa68f099bc7c9d13f8f03a1e04d3465c09cb" exitCode=0 Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.635451 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"e53ddff3-6cac-43f4-98c6-f909431098f1","Type":"ContainerDied","Data":"03e7a04f31ba17233ccde141bd80aa68f099bc7c9d13f8f03a1e04d3465c09cb"} Nov 25 18:30:45 crc kubenswrapper[4926]: I1125 18:30:45.691480 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 18:30:46 crc kubenswrapper[4926]: I1125 18:30:46.343223 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2b632d4-1a26-46d8-bba4-ff02c365a9d2" path="/var/lib/kubelet/pods/b2b632d4-1a26-46d8-bba4-ff02c365a9d2/volumes" Nov 25 18:30:46 crc kubenswrapper[4926]: I1125 18:30:46.344949 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e619cb52-411b-4008-80dd-770b4c6f6006" path="/var/lib/kubelet/pods/e619cb52-411b-4008-80dd-770b4c6f6006/volumes" Nov 25 18:30:46 crc kubenswrapper[4926]: I1125 18:30:46.380106 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:46 crc kubenswrapper[4926]: I1125 18:30:46.388712 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e1d8eee0-eb0b-41ad-b486-e7b20ffee29a-etc-swift\") pod \"swift-storage-0\" (UID: \"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a\") " pod="openstack/swift-storage-0" Nov 25 18:30:46 crc kubenswrapper[4926]: I1125 18:30:46.425294 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:46.646967 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerStarted","Data":"62fd802b0e4c2c79de2407504f585b3193c10611e6ff8e5a2b2b9c55c388275e"} Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:46.651063 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"38e40083-2404-4c67-88b5-41ccaf693c6e","Type":"ContainerStarted","Data":"0ffadf5eeb24a99e8f20e8a35f8dd76979e0e26d8eb368c5700951774a590eb2"} Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:46.651307 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:46.655333 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"e53ddff3-6cac-43f4-98c6-f909431098f1","Type":"ContainerStarted","Data":"582d7989ba88df4d2c50a15539c9a0f92b433b7c11d86ab241e7058a79f10d56"} Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:46.655730 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:46.690014 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.253355228 podStartE2EDuration="1m31.689981567s" podCreationTimestamp="2025-11-25 18:29:15 +0000 UTC" firstStartedPulling="2025-11-25 18:29:17.352627294 +0000 UTC m=+987.738140889" lastFinishedPulling="2025-11-25 18:30:10.789253613 +0000 UTC m=+1041.174767228" observedRunningTime="2025-11-25 18:30:46.682575635 +0000 UTC m=+1077.068089250" watchObservedRunningTime="2025-11-25 18:30:46.689981567 +0000 UTC m=+1077.075495172" Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:46.710760 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-notifications-server-0" podStartSLOduration=39.173965013 podStartE2EDuration="1m31.710733246s" podCreationTimestamp="2025-11-25 18:29:15 +0000 UTC" firstStartedPulling="2025-11-25 18:29:18.227293045 +0000 UTC m=+988.612806640" lastFinishedPulling="2025-11-25 18:30:10.764061268 +0000 UTC m=+1041.149574873" observedRunningTime="2025-11-25 18:30:46.708874218 +0000 UTC m=+1077.094387843" watchObservedRunningTime="2025-11-25 18:30:46.710733246 +0000 UTC m=+1077.096246851" Nov 25 18:30:47 crc kubenswrapper[4926]: I1125 18:30:47.717908 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 18:30:47 crc kubenswrapper[4926]: W1125 18:30:47.776147 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode1d8eee0_eb0b_41ad_b486_e7b20ffee29a.slice/crio-824153ef264ce3303ea6831a44bed6a6f08d4a36fa7c40cf2688d33e9f44fb9b WatchSource:0}: Error finding container 824153ef264ce3303ea6831a44bed6a6f08d4a36fa7c40cf2688d33e9f44fb9b: Status 404 returned error can't find the container with id 824153ef264ce3303ea6831a44bed6a6f08d4a36fa7c40cf2688d33e9f44fb9b Nov 25 18:30:48 crc kubenswrapper[4926]: I1125 18:30:48.681624 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"bb330597bba40205d0504c0671caeb569f3a057f543c0833ea980fd2b013ac0f"} Nov 25 18:30:48 crc kubenswrapper[4926]: I1125 18:30:48.682089 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"824153ef264ce3303ea6831a44bed6a6f08d4a36fa7c40cf2688d33e9f44fb9b"} Nov 25 18:30:49 crc kubenswrapper[4926]: I1125 18:30:49.694811 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerStarted","Data":"f360e063c64343b139d39e166b339300b1a923e9747d14997cdf3f93efb2506f"} Nov 25 18:30:49 crc kubenswrapper[4926]: I1125 18:30:49.704260 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"0480b5497ea4fb4735d2e32f1a787228fe2b4c523afd8ccedd0a9e7e6f810dd2"} Nov 25 18:30:49 crc kubenswrapper[4926]: I1125 18:30:49.704314 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"7e7ebfcdfefd5ce2082f9831832db4f2ccf256f768c1c519e5ff608fe10ed19c"} Nov 25 18:30:49 crc kubenswrapper[4926]: I1125 18:30:49.704328 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"d8b301e796afbd021d1651887292e0d899f224607f38d7aad3ddcae1a9e605e1"} Nov 25 18:30:50 crc kubenswrapper[4926]: I1125 18:30:50.741463 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"5204144d2f6739e383dcff8d1290de99904eb7e2e2f9eebd88bdc46806ef0f8a"} Nov 25 18:30:50 crc kubenswrapper[4926]: I1125 18:30:50.741973 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"03f6215dc6726d0cbdad575a3a2b68a8ebb56689bcf5c24e0942be778989c77d"} Nov 25 18:30:50 crc kubenswrapper[4926]: I1125 18:30:50.741984 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"951e26e7c76647484b702256bb1853e32b0da0815b1361c1dc84f22af222cdab"} Nov 25 18:30:51 crc kubenswrapper[4926]: I1125 18:30:51.758182 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"582c296f0a58b5be60d6c73f2321c4d4f01dd30c78d5454c92b0ff68cac6f31e"} Nov 25 18:30:51 crc kubenswrapper[4926]: I1125 18:30:51.758720 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"c0b6bcc09242e7365b08df645213abf99480923f95ad4918a72d1443b8f45dcc"} Nov 25 18:30:51 crc kubenswrapper[4926]: I1125 18:30:51.758734 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"3cb3b6c099972d1239ab279c95e0f269bd6802d3289230b470a78313fae92ed0"} Nov 25 18:30:52 crc kubenswrapper[4926]: I1125 18:30:52.775143 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"59ba8de0c7bfa4a5e1d7c7c4e567e88014d97b9c8cb5ac2b80d41a977499981c"} Nov 25 18:30:52 crc kubenswrapper[4926]: I1125 18:30:52.775577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"73b1024b8c1e27286512e176596b5d352d96b9e7659c34e5f801d25df0c70c79"} Nov 25 18:30:52 crc kubenswrapper[4926]: I1125 18:30:52.775591 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"cfe8be7aed8f4697d1c5760bd6db969e74c53a8bf42a0bcee8ffbbd16c213152"} Nov 25 18:30:52 crc kubenswrapper[4926]: I1125 18:30:52.775599 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"a88a0e540857ce3ed16506fbbcbcd74360acd7bc12ef8ce9366176b81776904d"} Nov 25 18:30:52 crc kubenswrapper[4926]: I1125 18:30:52.775611 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e1d8eee0-eb0b-41ad-b486-e7b20ffee29a","Type":"ContainerStarted","Data":"77c04d5dbbae298bad63ca62babe1d14e363fa6bba041f21f7979f35d110ea52"} Nov 25 18:30:52 crc kubenswrapper[4926]: I1125 18:30:52.815598 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.219609078 podStartE2EDuration="39.815574303s" podCreationTimestamp="2025-11-25 18:30:13 +0000 UTC" firstStartedPulling="2025-11-25 18:30:47.778142077 +0000 UTC m=+1078.163655682" lastFinishedPulling="2025-11-25 18:30:51.374107292 +0000 UTC m=+1081.759620907" observedRunningTime="2025-11-25 18:30:52.811835486 +0000 UTC m=+1083.197349101" watchObservedRunningTime="2025-11-25 18:30:52.815574303 +0000 UTC m=+1083.201087908" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.103909 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f57d4bfc9-868kb"] Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.105774 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.110305 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.123403 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f57d4bfc9-868kb"] Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.208981 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-config\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.209066 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-swift-storage-0\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.209089 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzq88\" (UniqueName: \"kubernetes.io/projected/4ca20e88-39d9-463a-8a59-909b495d8a8b-kube-api-access-wzq88\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.209329 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-sb\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.209629 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-svc\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.209809 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-nb\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.311441 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzq88\" (UniqueName: \"kubernetes.io/projected/4ca20e88-39d9-463a-8a59-909b495d8a8b-kube-api-access-wzq88\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.311590 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-sb\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.312883 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-sb\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.314529 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-svc\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.315258 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-svc\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.315456 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-nb\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.316176 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-nb\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.316332 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-config\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.317008 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-config\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.317207 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-swift-storage-0\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.317902 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-swift-storage-0\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.335803 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzq88\" (UniqueName: \"kubernetes.io/projected/4ca20e88-39d9-463a-8a59-909b495d8a8b-kube-api-access-wzq88\") pod \"dnsmasq-dns-6f57d4bfc9-868kb\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.435017 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:53 crc kubenswrapper[4926]: I1125 18:30:53.908622 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f57d4bfc9-868kb"] Nov 25 18:30:53 crc kubenswrapper[4926]: W1125 18:30:53.918949 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ca20e88_39d9_463a_8a59_909b495d8a8b.slice/crio-5395fb3717ba8c6918cee8fd87db92fa3dc6e173cd7f0db0f486d7e8eb31fef2 WatchSource:0}: Error finding container 5395fb3717ba8c6918cee8fd87db92fa3dc6e173cd7f0db0f486d7e8eb31fef2: Status 404 returned error can't find the container with id 5395fb3717ba8c6918cee8fd87db92fa3dc6e173cd7f0db0f486d7e8eb31fef2 Nov 25 18:30:54 crc kubenswrapper[4926]: I1125 18:30:54.805841 4926 generic.go:334] "Generic (PLEG): container finished" podID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerID="78b61847467f43cbcd7b952943154cba4e03f7604006089c66baa873c4571338" exitCode=0 Nov 25 18:30:54 crc kubenswrapper[4926]: I1125 18:30:54.805955 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" event={"ID":"4ca20e88-39d9-463a-8a59-909b495d8a8b","Type":"ContainerDied","Data":"78b61847467f43cbcd7b952943154cba4e03f7604006089c66baa873c4571338"} Nov 25 18:30:54 crc kubenswrapper[4926]: I1125 18:30:54.806227 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" event={"ID":"4ca20e88-39d9-463a-8a59-909b495d8a8b","Type":"ContainerStarted","Data":"5395fb3717ba8c6918cee8fd87db92fa3dc6e173cd7f0db0f486d7e8eb31fef2"} Nov 25 18:30:55 crc kubenswrapper[4926]: I1125 18:30:55.819058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" event={"ID":"4ca20e88-39d9-463a-8a59-909b495d8a8b","Type":"ContainerStarted","Data":"7f73d395b5a9159ab90c796cbd17a5c78b3543a9e23f95a8ef138fd3f5754fc7"} Nov 25 18:30:55 crc kubenswrapper[4926]: I1125 18:30:55.819637 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:30:55 crc kubenswrapper[4926]: I1125 18:30:55.843417 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" podStartSLOduration=2.8433852120000003 podStartE2EDuration="2.843385212s" podCreationTimestamp="2025-11-25 18:30:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:30:55.839243455 +0000 UTC m=+1086.224757070" watchObservedRunningTime="2025-11-25 18:30:55.843385212 +0000 UTC m=+1086.228898817" Nov 25 18:30:56 crc kubenswrapper[4926]: I1125 18:30:56.827987 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.107:5671: connect: connection refused" Nov 25 18:30:56 crc kubenswrapper[4926]: I1125 18:30:56.830652 4926 generic.go:334] "Generic (PLEG): container finished" podID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerID="f360e063c64343b139d39e166b339300b1a923e9747d14997cdf3f93efb2506f" exitCode=0 Nov 25 18:30:56 crc kubenswrapper[4926]: I1125 18:30:56.830691 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerDied","Data":"f360e063c64343b139d39e166b339300b1a923e9747d14997cdf3f93efb2506f"} Nov 25 18:30:57 crc kubenswrapper[4926]: I1125 18:30:57.513165 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-notifications-server-0" podUID="e53ddff3-6cac-43f4-98c6-f909431098f1" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.108:5671: connect: connection refused" Nov 25 18:30:57 crc kubenswrapper[4926]: I1125 18:30:57.591175 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.109:5671: connect: connection refused" Nov 25 18:30:57 crc kubenswrapper[4926]: I1125 18:30:57.844719 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerStarted","Data":"e90e2d4d25524df90364d719472780a27ef1a9cb1bc582b85a62fbb7f338d386"} Nov 25 18:31:00 crc kubenswrapper[4926]: I1125 18:31:00.877209 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerStarted","Data":"57f73daf6657f6aabff98fc989249aba5a37203487312f893612b8697a828bc6"} Nov 25 18:31:00 crc kubenswrapper[4926]: I1125 18:31:00.877627 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerStarted","Data":"c36a7da63701ddf686eb09fed950b8fbe57acfdfd9501f9309e0108b7c663f69"} Nov 25 18:31:00 crc kubenswrapper[4926]: I1125 18:31:00.918843 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.918820896 podStartE2EDuration="16.918820896s" podCreationTimestamp="2025-11-25 18:30:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:00.914833373 +0000 UTC m=+1091.300346978" watchObservedRunningTime="2025-11-25 18:31:00.918820896 +0000 UTC m=+1091.304334521" Nov 25 18:31:03 crc kubenswrapper[4926]: I1125 18:31:03.437736 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:31:03 crc kubenswrapper[4926]: I1125 18:31:03.525312 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84449c6c77-rhr8j"] Nov 25 18:31:03 crc kubenswrapper[4926]: I1125 18:31:03.525682 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" podUID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerName="dnsmasq-dns" containerID="cri-o://5f135fff9b75d69b77abffdf1c4a8f93e747fc59df12bd3ab5d7a9a82ba4c26c" gracePeriod=10 Nov 25 18:31:03 crc kubenswrapper[4926]: I1125 18:31:03.541096 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:31:03 crc kubenswrapper[4926]: I1125 18:31:03.541513 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:31:03 crc kubenswrapper[4926]: I1125 18:31:03.923715 4926 generic.go:334] "Generic (PLEG): container finished" podID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerID="5f135fff9b75d69b77abffdf1c4a8f93e747fc59df12bd3ab5d7a9a82ba4c26c" exitCode=0 Nov 25 18:31:03 crc kubenswrapper[4926]: I1125 18:31:03.924255 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" event={"ID":"7196b83b-c064-4190-94f0-e314cd1a6f21","Type":"ContainerDied","Data":"5f135fff9b75d69b77abffdf1c4a8f93e747fc59df12bd3ab5d7a9a82ba4c26c"} Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.033345 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.130626 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjh4d\" (UniqueName: \"kubernetes.io/projected/7196b83b-c064-4190-94f0-e314cd1a6f21-kube-api-access-hjh4d\") pod \"7196b83b-c064-4190-94f0-e314cd1a6f21\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.130783 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-dns-svc\") pod \"7196b83b-c064-4190-94f0-e314cd1a6f21\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.130868 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-nb\") pod \"7196b83b-c064-4190-94f0-e314cd1a6f21\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.130907 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-config\") pod \"7196b83b-c064-4190-94f0-e314cd1a6f21\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.131025 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-sb\") pod \"7196b83b-c064-4190-94f0-e314cd1a6f21\" (UID: \"7196b83b-c064-4190-94f0-e314cd1a6f21\") " Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.137605 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7196b83b-c064-4190-94f0-e314cd1a6f21-kube-api-access-hjh4d" (OuterVolumeSpecName: "kube-api-access-hjh4d") pod "7196b83b-c064-4190-94f0-e314cd1a6f21" (UID: "7196b83b-c064-4190-94f0-e314cd1a6f21"). InnerVolumeSpecName "kube-api-access-hjh4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.177952 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-config" (OuterVolumeSpecName: "config") pod "7196b83b-c064-4190-94f0-e314cd1a6f21" (UID: "7196b83b-c064-4190-94f0-e314cd1a6f21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.179158 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7196b83b-c064-4190-94f0-e314cd1a6f21" (UID: "7196b83b-c064-4190-94f0-e314cd1a6f21"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.185832 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7196b83b-c064-4190-94f0-e314cd1a6f21" (UID: "7196b83b-c064-4190-94f0-e314cd1a6f21"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.201824 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7196b83b-c064-4190-94f0-e314cd1a6f21" (UID: "7196b83b-c064-4190-94f0-e314cd1a6f21"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.233044 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.233084 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjh4d\" (UniqueName: \"kubernetes.io/projected/7196b83b-c064-4190-94f0-e314cd1a6f21-kube-api-access-hjh4d\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.233097 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.233107 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.233118 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7196b83b-c064-4190-94f0-e314cd1a6f21-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.936093 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" event={"ID":"7196b83b-c064-4190-94f0-e314cd1a6f21","Type":"ContainerDied","Data":"dbe38460d405937c937b1e7ece3f6c890a8490765848eaf95e011bac5b1b523c"} Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.936169 4926 scope.go:117] "RemoveContainer" containerID="5f135fff9b75d69b77abffdf1c4a8f93e747fc59df12bd3ab5d7a9a82ba4c26c" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.936174 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84449c6c77-rhr8j" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.963495 4926 scope.go:117] "RemoveContainer" containerID="15a211cd75c99d1669ebc8271259274b7f0f4bbf8ba50c55317786f90abbf1ff" Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.968930 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84449c6c77-rhr8j"] Nov 25 18:31:04 crc kubenswrapper[4926]: I1125 18:31:04.979186 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84449c6c77-rhr8j"] Nov 25 18:31:05 crc kubenswrapper[4926]: I1125 18:31:05.192615 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 18:31:06 crc kubenswrapper[4926]: I1125 18:31:06.348508 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7196b83b-c064-4190-94f0-e314cd1a6f21" path="/var/lib/kubelet/pods/7196b83b-c064-4190-94f0-e314cd1a6f21/volumes" Nov 25 18:31:06 crc kubenswrapper[4926]: I1125 18:31:06.826676 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.250036 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-64pj4"] Nov 25 18:31:07 crc kubenswrapper[4926]: E1125 18:31:07.250766 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerName="dnsmasq-dns" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.250786 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerName="dnsmasq-dns" Nov 25 18:31:07 crc kubenswrapper[4926]: E1125 18:31:07.250813 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerName="init" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.250822 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerName="init" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.250967 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7196b83b-c064-4190-94f0-e314cd1a6f21" containerName="dnsmasq-dns" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.251636 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.261628 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-64pj4"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.342281 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-jjkj2"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.344180 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.349780 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jjkj2"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.362973 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-b0e5-account-create-update-fbpdd"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.364497 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.377676 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.404637 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b0e5-account-create-update-fbpdd"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.411533 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-operator-scripts\") pod \"barbican-db-create-64pj4\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.411621 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vb65\" (UniqueName: \"kubernetes.io/projected/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-kube-api-access-4vb65\") pod \"barbican-db-create-64pj4\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.454581 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-fa53-account-create-update-pv9jl"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.455875 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.458505 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.468952 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fa53-account-create-update-pv9jl"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.512641 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-notifications-server-0" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.513426 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9c46\" (UniqueName: \"kubernetes.io/projected/0d88d8c5-1bd2-4c14-9fe0-516455a79891-kube-api-access-x9c46\") pod \"cinder-db-create-jjkj2\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.513498 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d129a26-451d-4572-8161-18fc1c5be7dd-operator-scripts\") pod \"barbican-b0e5-account-create-update-fbpdd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.513923 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxqkq\" (UniqueName: \"kubernetes.io/projected/8d129a26-451d-4572-8161-18fc1c5be7dd-kube-api-access-lxqkq\") pod \"barbican-b0e5-account-create-update-fbpdd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.513973 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d88d8c5-1bd2-4c14-9fe0-516455a79891-operator-scripts\") pod \"cinder-db-create-jjkj2\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.514012 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-operator-scripts\") pod \"barbican-db-create-64pj4\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.514039 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vb65\" (UniqueName: \"kubernetes.io/projected/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-kube-api-access-4vb65\") pod \"barbican-db-create-64pj4\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.515032 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-operator-scripts\") pod \"barbican-db-create-64pj4\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.539089 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-gczcd"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.540556 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.544199 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.544439 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.544599 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.544741 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2fxt6" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.546629 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vb65\" (UniqueName: \"kubernetes.io/projected/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-kube-api-access-4vb65\") pod \"barbican-db-create-64pj4\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.554904 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gczcd"] Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.570020 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.594587 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.616587 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9c46\" (UniqueName: \"kubernetes.io/projected/0d88d8c5-1bd2-4c14-9fe0-516455a79891-kube-api-access-x9c46\") pod \"cinder-db-create-jjkj2\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.616959 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d129a26-451d-4572-8161-18fc1c5be7dd-operator-scripts\") pod \"barbican-b0e5-account-create-update-fbpdd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.617012 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxqkq\" (UniqueName: \"kubernetes.io/projected/8d129a26-451d-4572-8161-18fc1c5be7dd-kube-api-access-lxqkq\") pod \"barbican-b0e5-account-create-update-fbpdd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.617038 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d88d8c5-1bd2-4c14-9fe0-516455a79891-operator-scripts\") pod \"cinder-db-create-jjkj2\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.617061 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dn4x\" (UniqueName: \"kubernetes.io/projected/985212cb-7a46-4ade-b877-cb67fd5ebf66-kube-api-access-8dn4x\") pod \"cinder-fa53-account-create-update-pv9jl\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.617095 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/985212cb-7a46-4ade-b877-cb67fd5ebf66-operator-scripts\") pod \"cinder-fa53-account-create-update-pv9jl\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.618452 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d88d8c5-1bd2-4c14-9fe0-516455a79891-operator-scripts\") pod \"cinder-db-create-jjkj2\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.619677 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d129a26-451d-4572-8161-18fc1c5be7dd-operator-scripts\") pod \"barbican-b0e5-account-create-update-fbpdd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.648416 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9c46\" (UniqueName: \"kubernetes.io/projected/0d88d8c5-1bd2-4c14-9fe0-516455a79891-kube-api-access-x9c46\") pod \"cinder-db-create-jjkj2\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.661955 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.667780 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxqkq\" (UniqueName: \"kubernetes.io/projected/8d129a26-451d-4572-8161-18fc1c5be7dd-kube-api-access-lxqkq\") pod \"barbican-b0e5-account-create-update-fbpdd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.689945 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.723765 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-config-data\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.723894 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-combined-ca-bundle\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.724108 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24p8d\" (UniqueName: \"kubernetes.io/projected/90a8c684-648e-4486-8da0-ff997b994626-kube-api-access-24p8d\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.724169 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dn4x\" (UniqueName: \"kubernetes.io/projected/985212cb-7a46-4ade-b877-cb67fd5ebf66-kube-api-access-8dn4x\") pod \"cinder-fa53-account-create-update-pv9jl\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.724217 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/985212cb-7a46-4ade-b877-cb67fd5ebf66-operator-scripts\") pod \"cinder-fa53-account-create-update-pv9jl\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.725365 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/985212cb-7a46-4ade-b877-cb67fd5ebf66-operator-scripts\") pod \"cinder-fa53-account-create-update-pv9jl\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.749501 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dn4x\" (UniqueName: \"kubernetes.io/projected/985212cb-7a46-4ade-b877-cb67fd5ebf66-kube-api-access-8dn4x\") pod \"cinder-fa53-account-create-update-pv9jl\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.771836 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.826755 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-config-data\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.826843 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-combined-ca-bundle\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.826945 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24p8d\" (UniqueName: \"kubernetes.io/projected/90a8c684-648e-4486-8da0-ff997b994626-kube-api-access-24p8d\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.835406 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-config-data\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.839411 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-combined-ca-bundle\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:07 crc kubenswrapper[4926]: I1125 18:31:07.849359 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24p8d\" (UniqueName: \"kubernetes.io/projected/90a8c684-648e-4486-8da0-ff997b994626-kube-api-access-24p8d\") pod \"keystone-db-sync-gczcd\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.055439 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.178762 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-64pj4"] Nov 25 18:31:08 crc kubenswrapper[4926]: W1125 18:31:08.190000 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca121447_f823_4aa7_b9ad_d3d8bd7d92bc.slice/crio-062da893ba95bb07573aa0b27447dbc7fbdd215e1156cdd934546c82f030ac9b WatchSource:0}: Error finding container 062da893ba95bb07573aa0b27447dbc7fbdd215e1156cdd934546c82f030ac9b: Status 404 returned error can't find the container with id 062da893ba95bb07573aa0b27447dbc7fbdd215e1156cdd934546c82f030ac9b Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.313388 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b0e5-account-create-update-fbpdd"] Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.321140 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jjkj2"] Nov 25 18:31:08 crc kubenswrapper[4926]: W1125 18:31:08.327628 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d129a26_451d_4572_8161_18fc1c5be7dd.slice/crio-f52287feda6a1c267d85884153c2d7f0fd66261e3081b65678c6b505ad774e4d WatchSource:0}: Error finding container f52287feda6a1c267d85884153c2d7f0fd66261e3081b65678c6b505ad774e4d: Status 404 returned error can't find the container with id f52287feda6a1c267d85884153c2d7f0fd66261e3081b65678c6b505ad774e4d Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.347345 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fa53-account-create-update-pv9jl"] Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.588893 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gczcd"] Nov 25 18:31:08 crc kubenswrapper[4926]: W1125 18:31:08.627632 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90a8c684_648e_4486_8da0_ff997b994626.slice/crio-1d66dc056e7c228add5f3d35a4323c1e6413d778b7f7c63832c0ce983e5e7b3f WatchSource:0}: Error finding container 1d66dc056e7c228add5f3d35a4323c1e6413d778b7f7c63832c0ce983e5e7b3f: Status 404 returned error can't find the container with id 1d66dc056e7c228add5f3d35a4323c1e6413d778b7f7c63832c0ce983e5e7b3f Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.805651 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-4wq2h"] Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.807080 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.822802 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-4wq2h"] Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.861205 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-zdqbf"] Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.862727 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.869881 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-fn7fb" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.870139 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.871506 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-zdqbf"] Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.941492 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-586d-account-create-update-wqnvl"] Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.942867 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.946486 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.949063 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9j55\" (UniqueName: \"kubernetes.io/projected/3c8d247f-750f-432d-8c0e-8e5c87cca18e-kube-api-access-b9j55\") pod \"glance-db-create-4wq2h\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.949109 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-db-sync-config-data\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.949197 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c8d247f-750f-432d-8c0e-8e5c87cca18e-operator-scripts\") pod \"glance-db-create-4wq2h\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.949225 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-combined-ca-bundle\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.949245 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-config-data\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.949292 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xfdl\" (UniqueName: \"kubernetes.io/projected/f9865625-980a-4b3e-bb1e-53d5223db907-kube-api-access-8xfdl\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:08 crc kubenswrapper[4926]: I1125 18:31:08.988443 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-586d-account-create-update-wqnvl"] Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.030858 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fa53-account-create-update-pv9jl" event={"ID":"985212cb-7a46-4ade-b877-cb67fd5ebf66","Type":"ContainerStarted","Data":"8ed37712c887ccf88b76b6bf6a6a603208689a11b072f05093e76aad50e444b1"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.030918 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fa53-account-create-update-pv9jl" event={"ID":"985212cb-7a46-4ade-b877-cb67fd5ebf66","Type":"ContainerStarted","Data":"50dc7111dce5eed92d8ed57f76628e5f9b7fa7315db5d46bfd701d6820474d2b"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.051459 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gczcd" event={"ID":"90a8c684-648e-4486-8da0-ff997b994626","Type":"ContainerStarted","Data":"1d66dc056e7c228add5f3d35a4323c1e6413d778b7f7c63832c0ce983e5e7b3f"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052101 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xfdl\" (UniqueName: \"kubernetes.io/projected/f9865625-980a-4b3e-bb1e-53d5223db907-kube-api-access-8xfdl\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052178 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xxtl\" (UniqueName: \"kubernetes.io/projected/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-kube-api-access-4xxtl\") pod \"glance-586d-account-create-update-wqnvl\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052218 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9j55\" (UniqueName: \"kubernetes.io/projected/3c8d247f-750f-432d-8c0e-8e5c87cca18e-kube-api-access-b9j55\") pod \"glance-db-create-4wq2h\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052238 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-db-sync-config-data\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052262 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-operator-scripts\") pod \"glance-586d-account-create-update-wqnvl\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052312 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c8d247f-750f-432d-8c0e-8e5c87cca18e-operator-scripts\") pod \"glance-db-create-4wq2h\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052342 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-combined-ca-bundle\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.052364 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-config-data\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.064157 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c8d247f-750f-432d-8c0e-8e5c87cca18e-operator-scripts\") pod \"glance-db-create-4wq2h\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.081167 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-config-data\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.084957 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-db-sync-config-data\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.085321 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b0e5-account-create-update-fbpdd" event={"ID":"8d129a26-451d-4572-8161-18fc1c5be7dd","Type":"ContainerStarted","Data":"0a836c7110c8b3d1dbc0e243ec3e857f0f246dd2465d16af1402ca1fc8c5e6a4"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.085365 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b0e5-account-create-update-fbpdd" event={"ID":"8d129a26-451d-4572-8161-18fc1c5be7dd","Type":"ContainerStarted","Data":"f52287feda6a1c267d85884153c2d7f0fd66261e3081b65678c6b505ad774e4d"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.089257 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-combined-ca-bundle\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.090218 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9j55\" (UniqueName: \"kubernetes.io/projected/3c8d247f-750f-432d-8c0e-8e5c87cca18e-kube-api-access-b9j55\") pod \"glance-db-create-4wq2h\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.091049 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xfdl\" (UniqueName: \"kubernetes.io/projected/f9865625-980a-4b3e-bb1e-53d5223db907-kube-api-access-8xfdl\") pod \"watcher-db-sync-zdqbf\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.108574 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-64pj4" event={"ID":"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc","Type":"ContainerStarted","Data":"7bd2107ab042b4350792da30839205b003c53faf5f72f8207d11a4ff6f3eb4ec"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.108652 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-64pj4" event={"ID":"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc","Type":"ContainerStarted","Data":"062da893ba95bb07573aa0b27447dbc7fbdd215e1156cdd934546c82f030ac9b"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.125426 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-fa53-account-create-update-pv9jl" podStartSLOduration=2.125396554 podStartE2EDuration="2.125396554s" podCreationTimestamp="2025-11-25 18:31:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:09.075617352 +0000 UTC m=+1099.461130957" watchObservedRunningTime="2025-11-25 18:31:09.125396554 +0000 UTC m=+1099.510910159" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.139551 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jjkj2" event={"ID":"0d88d8c5-1bd2-4c14-9fe0-516455a79891","Type":"ContainerStarted","Data":"49a769390433ea53221af2a300b25a993c04aaf325463920a04467a8c7331611"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.139609 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jjkj2" event={"ID":"0d88d8c5-1bd2-4c14-9fe0-516455a79891","Type":"ContainerStarted","Data":"5620810d4fe3fb61c29b53de07f8295e57b050a394576173bc1ffc553bf86245"} Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.156423 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.157664 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xxtl\" (UniqueName: \"kubernetes.io/projected/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-kube-api-access-4xxtl\") pod \"glance-586d-account-create-update-wqnvl\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.157751 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-operator-scripts\") pod \"glance-586d-account-create-update-wqnvl\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.159825 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-operator-scripts\") pod \"glance-586d-account-create-update-wqnvl\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.193163 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.221088 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xxtl\" (UniqueName: \"kubernetes.io/projected/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-kube-api-access-4xxtl\") pod \"glance-586d-account-create-update-wqnvl\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.267757 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-jncjg"] Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.269057 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.281815 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.287759 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-jncjg"] Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.327782 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-b0e5-account-create-update-fbpdd" podStartSLOduration=2.327758114 podStartE2EDuration="2.327758114s" podCreationTimestamp="2025-11-25 18:31:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:09.111552405 +0000 UTC m=+1099.497066010" watchObservedRunningTime="2025-11-25 18:31:09.327758114 +0000 UTC m=+1099.713271719" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.366445 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-9639-account-create-update-l4sqt"] Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.367934 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.370611 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.371176 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9639-account-create-update-l4sqt"] Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.371764 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-64pj4" podStartSLOduration=2.3717434170000002 podStartE2EDuration="2.371743417s" podCreationTimestamp="2025-11-25 18:31:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:09.172854558 +0000 UTC m=+1099.558368163" watchObservedRunningTime="2025-11-25 18:31:09.371743417 +0000 UTC m=+1099.757257022" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.372628 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd5k8\" (UniqueName: \"kubernetes.io/projected/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-kube-api-access-qd5k8\") pod \"neutron-db-create-jncjg\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.372677 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-operator-scripts\") pod \"neutron-db-create-jncjg\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.379978 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-jjkj2" podStartSLOduration=2.37996079 podStartE2EDuration="2.37996079s" podCreationTimestamp="2025-11-25 18:31:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:09.20869276 +0000 UTC m=+1099.594206365" watchObservedRunningTime="2025-11-25 18:31:09.37996079 +0000 UTC m=+1099.765474395" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.478504 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb1ce46-a836-47f2-b91a-99161a7e66cd-operator-scripts\") pod \"neutron-9639-account-create-update-l4sqt\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.479077 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh42k\" (UniqueName: \"kubernetes.io/projected/6eb1ce46-a836-47f2-b91a-99161a7e66cd-kube-api-access-dh42k\") pod \"neutron-9639-account-create-update-l4sqt\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.479165 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd5k8\" (UniqueName: \"kubernetes.io/projected/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-kube-api-access-qd5k8\") pod \"neutron-db-create-jncjg\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.479257 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-operator-scripts\") pod \"neutron-db-create-jncjg\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.480220 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-operator-scripts\") pod \"neutron-db-create-jncjg\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.519767 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd5k8\" (UniqueName: \"kubernetes.io/projected/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-kube-api-access-qd5k8\") pod \"neutron-db-create-jncjg\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.584541 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb1ce46-a836-47f2-b91a-99161a7e66cd-operator-scripts\") pod \"neutron-9639-account-create-update-l4sqt\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.585065 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh42k\" (UniqueName: \"kubernetes.io/projected/6eb1ce46-a836-47f2-b91a-99161a7e66cd-kube-api-access-dh42k\") pod \"neutron-9639-account-create-update-l4sqt\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.593442 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb1ce46-a836-47f2-b91a-99161a7e66cd-operator-scripts\") pod \"neutron-9639-account-create-update-l4sqt\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.608345 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.608905 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh42k\" (UniqueName: \"kubernetes.io/projected/6eb1ce46-a836-47f2-b91a-99161a7e66cd-kube-api-access-dh42k\") pod \"neutron-9639-account-create-update-l4sqt\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.694313 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.914896 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-586d-account-create-update-wqnvl"] Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.935450 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-zdqbf"] Nov 25 18:31:09 crc kubenswrapper[4926]: I1125 18:31:09.942315 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-4wq2h"] Nov 25 18:31:09 crc kubenswrapper[4926]: W1125 18:31:09.952895 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c8d247f_750f_432d_8c0e_8e5c87cca18e.slice/crio-726945a7533403c167ae21fca1a09bc36f082a464eec45b7c3034ed000b1e16f WatchSource:0}: Error finding container 726945a7533403c167ae21fca1a09bc36f082a464eec45b7c3034ed000b1e16f: Status 404 returned error can't find the container with id 726945a7533403c167ae21fca1a09bc36f082a464eec45b7c3034ed000b1e16f Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.162420 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-jncjg"] Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.162917 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-586d-account-create-update-wqnvl" event={"ID":"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2","Type":"ContainerStarted","Data":"442df9e1b8d0d782a4bb16986ea16bb353f0fdb7a1fad932a233effc96931c3b"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.162979 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-586d-account-create-update-wqnvl" event={"ID":"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2","Type":"ContainerStarted","Data":"9cf706c13c0d399e94ac126ae4ce1f732439f67665e6be630f7df17c7e4090e6"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.167647 4926 generic.go:334] "Generic (PLEG): container finished" podID="8d129a26-451d-4572-8161-18fc1c5be7dd" containerID="0a836c7110c8b3d1dbc0e243ec3e857f0f246dd2465d16af1402ca1fc8c5e6a4" exitCode=0 Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.167717 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b0e5-account-create-update-fbpdd" event={"ID":"8d129a26-451d-4572-8161-18fc1c5be7dd","Type":"ContainerDied","Data":"0a836c7110c8b3d1dbc0e243ec3e857f0f246dd2465d16af1402ca1fc8c5e6a4"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.171523 4926 generic.go:334] "Generic (PLEG): container finished" podID="ca121447-f823-4aa7-b9ad-d3d8bd7d92bc" containerID="7bd2107ab042b4350792da30839205b003c53faf5f72f8207d11a4ff6f3eb4ec" exitCode=0 Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.171577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-64pj4" event={"ID":"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc","Type":"ContainerDied","Data":"7bd2107ab042b4350792da30839205b003c53faf5f72f8207d11a4ff6f3eb4ec"} Nov 25 18:31:10 crc kubenswrapper[4926]: W1125 18:31:10.182674 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba8ba4ab_f0d8_4f8e_9a8e_d3bcb07b54f6.slice/crio-c928f41379175667f775a9bdaef3ae1cd171b337fb9743ac9dc01c1083a1e438 WatchSource:0}: Error finding container c928f41379175667f775a9bdaef3ae1cd171b337fb9743ac9dc01c1083a1e438: Status 404 returned error can't find the container with id c928f41379175667f775a9bdaef3ae1cd171b337fb9743ac9dc01c1083a1e438 Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.182953 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-4wq2h" event={"ID":"3c8d247f-750f-432d-8c0e-8e5c87cca18e","Type":"ContainerStarted","Data":"1bedda484988ded2f362cf0e33516ba0ebc1e08f57acb7dbfaa81fa3e3ce8e94"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.182998 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-4wq2h" event={"ID":"3c8d247f-750f-432d-8c0e-8e5c87cca18e","Type":"ContainerStarted","Data":"726945a7533403c167ae21fca1a09bc36f082a464eec45b7c3034ed000b1e16f"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.183979 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-586d-account-create-update-wqnvl" podStartSLOduration=2.183959865 podStartE2EDuration="2.183959865s" podCreationTimestamp="2025-11-25 18:31:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:10.18143704 +0000 UTC m=+1100.566950645" watchObservedRunningTime="2025-11-25 18:31:10.183959865 +0000 UTC m=+1100.569473470" Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.189877 4926 generic.go:334] "Generic (PLEG): container finished" podID="0d88d8c5-1bd2-4c14-9fe0-516455a79891" containerID="49a769390433ea53221af2a300b25a993c04aaf325463920a04467a8c7331611" exitCode=0 Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.189965 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jjkj2" event={"ID":"0d88d8c5-1bd2-4c14-9fe0-516455a79891","Type":"ContainerDied","Data":"49a769390433ea53221af2a300b25a993c04aaf325463920a04467a8c7331611"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.206334 4926 generic.go:334] "Generic (PLEG): container finished" podID="985212cb-7a46-4ade-b877-cb67fd5ebf66" containerID="8ed37712c887ccf88b76b6bf6a6a603208689a11b072f05093e76aad50e444b1" exitCode=0 Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.206415 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fa53-account-create-update-pv9jl" event={"ID":"985212cb-7a46-4ade-b877-cb67fd5ebf66","Type":"ContainerDied","Data":"8ed37712c887ccf88b76b6bf6a6a603208689a11b072f05093e76aad50e444b1"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.214699 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-zdqbf" event={"ID":"f9865625-980a-4b3e-bb1e-53d5223db907","Type":"ContainerStarted","Data":"961e92e91defdfd23ebc464e4e9ba8936cc0a802ed865c5ed1ce86b62cca8bbd"} Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.312602 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-4wq2h" podStartSLOduration=2.312576538 podStartE2EDuration="2.312576538s" podCreationTimestamp="2025-11-25 18:31:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:10.277206809 +0000 UTC m=+1100.662720414" watchObservedRunningTime="2025-11-25 18:31:10.312576538 +0000 UTC m=+1100.698090143" Nov 25 18:31:10 crc kubenswrapper[4926]: W1125 18:31:10.349730 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6eb1ce46_a836_47f2_b91a_99161a7e66cd.slice/crio-b86d1db5e62dbb9a6010da1aa6f35be4efb543ef9edf9e54c1057401c23187f9 WatchSource:0}: Error finding container b86d1db5e62dbb9a6010da1aa6f35be4efb543ef9edf9e54c1057401c23187f9: Status 404 returned error can't find the container with id b86d1db5e62dbb9a6010da1aa6f35be4efb543ef9edf9e54c1057401c23187f9 Nov 25 18:31:10 crc kubenswrapper[4926]: I1125 18:31:10.357032 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9639-account-create-update-l4sqt"] Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.233042 4926 generic.go:334] "Generic (PLEG): container finished" podID="5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2" containerID="442df9e1b8d0d782a4bb16986ea16bb353f0fdb7a1fad932a233effc96931c3b" exitCode=0 Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.233098 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-586d-account-create-update-wqnvl" event={"ID":"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2","Type":"ContainerDied","Data":"442df9e1b8d0d782a4bb16986ea16bb353f0fdb7a1fad932a233effc96931c3b"} Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.236998 4926 generic.go:334] "Generic (PLEG): container finished" podID="6eb1ce46-a836-47f2-b91a-99161a7e66cd" containerID="2efc7517b01f1cd82ba40717fbe0c947cce725478ef1f7d7d54be3f470c1e1bf" exitCode=0 Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.237082 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9639-account-create-update-l4sqt" event={"ID":"6eb1ce46-a836-47f2-b91a-99161a7e66cd","Type":"ContainerDied","Data":"2efc7517b01f1cd82ba40717fbe0c947cce725478ef1f7d7d54be3f470c1e1bf"} Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.237114 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9639-account-create-update-l4sqt" event={"ID":"6eb1ce46-a836-47f2-b91a-99161a7e66cd","Type":"ContainerStarted","Data":"b86d1db5e62dbb9a6010da1aa6f35be4efb543ef9edf9e54c1057401c23187f9"} Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.239037 4926 generic.go:334] "Generic (PLEG): container finished" podID="3c8d247f-750f-432d-8c0e-8e5c87cca18e" containerID="1bedda484988ded2f362cf0e33516ba0ebc1e08f57acb7dbfaa81fa3e3ce8e94" exitCode=0 Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.239145 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-4wq2h" event={"ID":"3c8d247f-750f-432d-8c0e-8e5c87cca18e","Type":"ContainerDied","Data":"1bedda484988ded2f362cf0e33516ba0ebc1e08f57acb7dbfaa81fa3e3ce8e94"} Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.240845 4926 generic.go:334] "Generic (PLEG): container finished" podID="ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6" containerID="421c2b3b9b85603d26a2c16c32a7413ac0e61b5a31fa238f4bcdfead1c0d26de" exitCode=0 Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.241229 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-jncjg" event={"ID":"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6","Type":"ContainerDied","Data":"421c2b3b9b85603d26a2c16c32a7413ac0e61b5a31fa238f4bcdfead1c0d26de"} Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.241299 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-jncjg" event={"ID":"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6","Type":"ContainerStarted","Data":"c928f41379175667f775a9bdaef3ae1cd171b337fb9743ac9dc01c1083a1e438"} Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.739662 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.747068 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.768672 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.794213 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837179 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-operator-scripts\") pod \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837241 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/985212cb-7a46-4ade-b877-cb67fd5ebf66-operator-scripts\") pod \"985212cb-7a46-4ade-b877-cb67fd5ebf66\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837268 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxqkq\" (UniqueName: \"kubernetes.io/projected/8d129a26-451d-4572-8161-18fc1c5be7dd-kube-api-access-lxqkq\") pod \"8d129a26-451d-4572-8161-18fc1c5be7dd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837452 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9c46\" (UniqueName: \"kubernetes.io/projected/0d88d8c5-1bd2-4c14-9fe0-516455a79891-kube-api-access-x9c46\") pod \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837489 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dn4x\" (UniqueName: \"kubernetes.io/projected/985212cb-7a46-4ade-b877-cb67fd5ebf66-kube-api-access-8dn4x\") pod \"985212cb-7a46-4ade-b877-cb67fd5ebf66\" (UID: \"985212cb-7a46-4ade-b877-cb67fd5ebf66\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837507 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d129a26-451d-4572-8161-18fc1c5be7dd-operator-scripts\") pod \"8d129a26-451d-4572-8161-18fc1c5be7dd\" (UID: \"8d129a26-451d-4572-8161-18fc1c5be7dd\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837560 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d88d8c5-1bd2-4c14-9fe0-516455a79891-operator-scripts\") pod \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\" (UID: \"0d88d8c5-1bd2-4c14-9fe0-516455a79891\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.837643 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vb65\" (UniqueName: \"kubernetes.io/projected/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-kube-api-access-4vb65\") pod \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\" (UID: \"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc\") " Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.840015 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d129a26-451d-4572-8161-18fc1c5be7dd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8d129a26-451d-4572-8161-18fc1c5be7dd" (UID: "8d129a26-451d-4572-8161-18fc1c5be7dd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.840196 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/985212cb-7a46-4ade-b877-cb67fd5ebf66-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "985212cb-7a46-4ade-b877-cb67fd5ebf66" (UID: "985212cb-7a46-4ade-b877-cb67fd5ebf66"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.840206 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d88d8c5-1bd2-4c14-9fe0-516455a79891-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0d88d8c5-1bd2-4c14-9fe0-516455a79891" (UID: "0d88d8c5-1bd2-4c14-9fe0-516455a79891"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.840889 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ca121447-f823-4aa7-b9ad-d3d8bd7d92bc" (UID: "ca121447-f823-4aa7-b9ad-d3d8bd7d92bc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.845710 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d88d8c5-1bd2-4c14-9fe0-516455a79891-kube-api-access-x9c46" (OuterVolumeSpecName: "kube-api-access-x9c46") pod "0d88d8c5-1bd2-4c14-9fe0-516455a79891" (UID: "0d88d8c5-1bd2-4c14-9fe0-516455a79891"). InnerVolumeSpecName "kube-api-access-x9c46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.845774 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-kube-api-access-4vb65" (OuterVolumeSpecName: "kube-api-access-4vb65") pod "ca121447-f823-4aa7-b9ad-d3d8bd7d92bc" (UID: "ca121447-f823-4aa7-b9ad-d3d8bd7d92bc"). InnerVolumeSpecName "kube-api-access-4vb65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.845970 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d129a26-451d-4572-8161-18fc1c5be7dd-kube-api-access-lxqkq" (OuterVolumeSpecName: "kube-api-access-lxqkq") pod "8d129a26-451d-4572-8161-18fc1c5be7dd" (UID: "8d129a26-451d-4572-8161-18fc1c5be7dd"). InnerVolumeSpecName "kube-api-access-lxqkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.846442 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/985212cb-7a46-4ade-b877-cb67fd5ebf66-kube-api-access-8dn4x" (OuterVolumeSpecName: "kube-api-access-8dn4x") pod "985212cb-7a46-4ade-b877-cb67fd5ebf66" (UID: "985212cb-7a46-4ade-b877-cb67fd5ebf66"). InnerVolumeSpecName "kube-api-access-8dn4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939755 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9c46\" (UniqueName: \"kubernetes.io/projected/0d88d8c5-1bd2-4c14-9fe0-516455a79891-kube-api-access-x9c46\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939794 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dn4x\" (UniqueName: \"kubernetes.io/projected/985212cb-7a46-4ade-b877-cb67fd5ebf66-kube-api-access-8dn4x\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939803 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d129a26-451d-4572-8161-18fc1c5be7dd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939812 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d88d8c5-1bd2-4c14-9fe0-516455a79891-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939823 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vb65\" (UniqueName: \"kubernetes.io/projected/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-kube-api-access-4vb65\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939831 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939841 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/985212cb-7a46-4ade-b877-cb67fd5ebf66-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:11 crc kubenswrapper[4926]: I1125 18:31:11.939849 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxqkq\" (UniqueName: \"kubernetes.io/projected/8d129a26-451d-4572-8161-18fc1c5be7dd-kube-api-access-lxqkq\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.256356 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b0e5-account-create-update-fbpdd" event={"ID":"8d129a26-451d-4572-8161-18fc1c5be7dd","Type":"ContainerDied","Data":"f52287feda6a1c267d85884153c2d7f0fd66261e3081b65678c6b505ad774e4d"} Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.256430 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f52287feda6a1c267d85884153c2d7f0fd66261e3081b65678c6b505ad774e4d" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.256399 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0e5-account-create-update-fbpdd" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.258049 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-64pj4" event={"ID":"ca121447-f823-4aa7-b9ad-d3d8bd7d92bc","Type":"ContainerDied","Data":"062da893ba95bb07573aa0b27447dbc7fbdd215e1156cdd934546c82f030ac9b"} Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.258093 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="062da893ba95bb07573aa0b27447dbc7fbdd215e1156cdd934546c82f030ac9b" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.258106 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-64pj4" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.261184 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jjkj2" event={"ID":"0d88d8c5-1bd2-4c14-9fe0-516455a79891","Type":"ContainerDied","Data":"5620810d4fe3fb61c29b53de07f8295e57b050a394576173bc1ffc553bf86245"} Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.261240 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5620810d4fe3fb61c29b53de07f8295e57b050a394576173bc1ffc553bf86245" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.261326 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jjkj2" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.265127 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fa53-account-create-update-pv9jl" event={"ID":"985212cb-7a46-4ade-b877-cb67fd5ebf66","Type":"ContainerDied","Data":"50dc7111dce5eed92d8ed57f76628e5f9b7fa7315db5d46bfd701d6820474d2b"} Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.265985 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50dc7111dce5eed92d8ed57f76628e5f9b7fa7315db5d46bfd701d6820474d2b" Nov 25 18:31:12 crc kubenswrapper[4926]: I1125 18:31:12.265271 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa53-account-create-update-pv9jl" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.192603 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.205488 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.301357 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.777953 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.785454 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.801080 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.806902 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.837795 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd5k8\" (UniqueName: \"kubernetes.io/projected/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-kube-api-access-qd5k8\") pod \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.837889 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb1ce46-a836-47f2-b91a-99161a7e66cd-operator-scripts\") pod \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.837909 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-operator-scripts\") pod \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\" (UID: \"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838006 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c8d247f-750f-432d-8c0e-8e5c87cca18e-operator-scripts\") pod \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838068 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9j55\" (UniqueName: \"kubernetes.io/projected/3c8d247f-750f-432d-8c0e-8e5c87cca18e-kube-api-access-b9j55\") pod \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\" (UID: \"3c8d247f-750f-432d-8c0e-8e5c87cca18e\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838116 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dh42k\" (UniqueName: \"kubernetes.io/projected/6eb1ce46-a836-47f2-b91a-99161a7e66cd-kube-api-access-dh42k\") pod \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\" (UID: \"6eb1ce46-a836-47f2-b91a-99161a7e66cd\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838150 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-operator-scripts\") pod \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838190 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xxtl\" (UniqueName: \"kubernetes.io/projected/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-kube-api-access-4xxtl\") pod \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\" (UID: \"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2\") " Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838508 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c8d247f-750f-432d-8c0e-8e5c87cca18e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c8d247f-750f-432d-8c0e-8e5c87cca18e" (UID: "3c8d247f-750f-432d-8c0e-8e5c87cca18e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838552 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6" (UID: "ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.838975 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb1ce46-a836-47f2-b91a-99161a7e66cd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6eb1ce46-a836-47f2-b91a-99161a7e66cd" (UID: "6eb1ce46-a836-47f2-b91a-99161a7e66cd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.839193 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2" (UID: "5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.844843 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-kube-api-access-qd5k8" (OuterVolumeSpecName: "kube-api-access-qd5k8") pod "ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6" (UID: "ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6"). InnerVolumeSpecName "kube-api-access-qd5k8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.847844 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eb1ce46-a836-47f2-b91a-99161a7e66cd-kube-api-access-dh42k" (OuterVolumeSpecName: "kube-api-access-dh42k") pod "6eb1ce46-a836-47f2-b91a-99161a7e66cd" (UID: "6eb1ce46-a836-47f2-b91a-99161a7e66cd"). InnerVolumeSpecName "kube-api-access-dh42k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.849280 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-kube-api-access-4xxtl" (OuterVolumeSpecName: "kube-api-access-4xxtl") pod "5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2" (UID: "5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2"). InnerVolumeSpecName "kube-api-access-4xxtl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.859713 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c8d247f-750f-432d-8c0e-8e5c87cca18e-kube-api-access-b9j55" (OuterVolumeSpecName: "kube-api-access-b9j55") pod "3c8d247f-750f-432d-8c0e-8e5c87cca18e" (UID: "3c8d247f-750f-432d-8c0e-8e5c87cca18e"). InnerVolumeSpecName "kube-api-access-b9j55". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941749 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c8d247f-750f-432d-8c0e-8e5c87cca18e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941787 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9j55\" (UniqueName: \"kubernetes.io/projected/3c8d247f-750f-432d-8c0e-8e5c87cca18e-kube-api-access-b9j55\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941797 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dh42k\" (UniqueName: \"kubernetes.io/projected/6eb1ce46-a836-47f2-b91a-99161a7e66cd-kube-api-access-dh42k\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941806 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941815 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xxtl\" (UniqueName: \"kubernetes.io/projected/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2-kube-api-access-4xxtl\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941824 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd5k8\" (UniqueName: \"kubernetes.io/projected/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-kube-api-access-qd5k8\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941832 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:15 crc kubenswrapper[4926]: I1125 18:31:15.941840 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb1ce46-a836-47f2-b91a-99161a7e66cd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.311558 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-4wq2h" event={"ID":"3c8d247f-750f-432d-8c0e-8e5c87cca18e","Type":"ContainerDied","Data":"726945a7533403c167ae21fca1a09bc36f082a464eec45b7c3034ed000b1e16f"} Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.311623 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="726945a7533403c167ae21fca1a09bc36f082a464eec45b7c3034ed000b1e16f" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.311721 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-4wq2h" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.331771 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jncjg" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.334435 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-586d-account-create-update-wqnvl" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.338322 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9639-account-create-update-l4sqt" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.385319 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-jncjg" event={"ID":"ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6","Type":"ContainerDied","Data":"c928f41379175667f775a9bdaef3ae1cd171b337fb9743ac9dc01c1083a1e438"} Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.385388 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c928f41379175667f775a9bdaef3ae1cd171b337fb9743ac9dc01c1083a1e438" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.385416 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-586d-account-create-update-wqnvl" event={"ID":"5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2","Type":"ContainerDied","Data":"9cf706c13c0d399e94ac126ae4ce1f732439f67665e6be630f7df17c7e4090e6"} Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.385800 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cf706c13c0d399e94ac126ae4ce1f732439f67665e6be630f7df17c7e4090e6" Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.385828 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9639-account-create-update-l4sqt" event={"ID":"6eb1ce46-a836-47f2-b91a-99161a7e66cd","Type":"ContainerDied","Data":"b86d1db5e62dbb9a6010da1aa6f35be4efb543ef9edf9e54c1057401c23187f9"} Nov 25 18:31:16 crc kubenswrapper[4926]: I1125 18:31:16.385846 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b86d1db5e62dbb9a6010da1aa6f35be4efb543ef9edf9e54c1057401c23187f9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.108508 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-wvbc9"] Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.109922 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d88d8c5-1bd2-4c14-9fe0-516455a79891" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.109942 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d88d8c5-1bd2-4c14-9fe0-516455a79891" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.109961 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.109968 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.109981 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="985212cb-7a46-4ade-b877-cb67fd5ebf66" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.109989 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="985212cb-7a46-4ade-b877-cb67fd5ebf66" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.109997 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb1ce46-a836-47f2-b91a-99161a7e66cd" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110004 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb1ce46-a836-47f2-b91a-99161a7e66cd" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.110025 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c8d247f-750f-432d-8c0e-8e5c87cca18e" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110032 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c8d247f-750f-432d-8c0e-8e5c87cca18e" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.110050 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca121447-f823-4aa7-b9ad-d3d8bd7d92bc" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110058 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca121447-f823-4aa7-b9ad-d3d8bd7d92bc" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.110073 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110079 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: E1125 18:31:19.110094 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d129a26-451d-4572-8161-18fc1c5be7dd" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110101 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d129a26-451d-4572-8161-18fc1c5be7dd" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110293 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eb1ce46-a836-47f2-b91a-99161a7e66cd" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110311 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d88d8c5-1bd2-4c14-9fe0-516455a79891" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110323 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="985212cb-7a46-4ade-b877-cb67fd5ebf66" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110337 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110346 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110359 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca121447-f823-4aa7-b9ad-d3d8bd7d92bc" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110396 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c8d247f-750f-432d-8c0e-8e5c87cca18e" containerName="mariadb-database-create" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.110414 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d129a26-451d-4572-8161-18fc1c5be7dd" containerName="mariadb-account-create-update" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.111245 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.129519 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wvbc9"] Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.156937 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mzhrc" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.157212 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.211598 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-config-data\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.211731 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48rgt\" (UniqueName: \"kubernetes.io/projected/e56a5d23-9046-44fb-b484-19e044ee5ab7-kube-api-access-48rgt\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.211818 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-combined-ca-bundle\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.211847 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-db-sync-config-data\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.314050 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-combined-ca-bundle\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.314105 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-db-sync-config-data\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.314185 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-config-data\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.314243 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48rgt\" (UniqueName: \"kubernetes.io/projected/e56a5d23-9046-44fb-b484-19e044ee5ab7-kube-api-access-48rgt\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.320570 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-combined-ca-bundle\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.324805 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-config-data\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.324823 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-db-sync-config-data\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.343466 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48rgt\" (UniqueName: \"kubernetes.io/projected/e56a5d23-9046-44fb-b484-19e044ee5ab7-kube-api-access-48rgt\") pod \"glance-db-sync-wvbc9\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:19 crc kubenswrapper[4926]: I1125 18:31:19.478123 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wvbc9" Nov 25 18:31:22 crc kubenswrapper[4926]: I1125 18:31:22.205817 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wvbc9"] Nov 25 18:31:22 crc kubenswrapper[4926]: W1125 18:31:22.205888 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode56a5d23_9046_44fb_b484_19e044ee5ab7.slice/crio-a31b7d2eb8d8bdc000229bdcbb6b93708eff83a4af68852b7393403804b87216 WatchSource:0}: Error finding container a31b7d2eb8d8bdc000229bdcbb6b93708eff83a4af68852b7393403804b87216: Status 404 returned error can't find the container with id a31b7d2eb8d8bdc000229bdcbb6b93708eff83a4af68852b7393403804b87216 Nov 25 18:31:22 crc kubenswrapper[4926]: I1125 18:31:22.408237 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-zdqbf" event={"ID":"f9865625-980a-4b3e-bb1e-53d5223db907","Type":"ContainerStarted","Data":"5db9a216fd2d187084d817e9246e63b11e0fb7838f40d5e4d3733590a9bf4e1f"} Nov 25 18:31:22 crc kubenswrapper[4926]: I1125 18:31:22.411248 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wvbc9" event={"ID":"e56a5d23-9046-44fb-b484-19e044ee5ab7","Type":"ContainerStarted","Data":"a31b7d2eb8d8bdc000229bdcbb6b93708eff83a4af68852b7393403804b87216"} Nov 25 18:31:22 crc kubenswrapper[4926]: I1125 18:31:22.412866 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gczcd" event={"ID":"90a8c684-648e-4486-8da0-ff997b994626","Type":"ContainerStarted","Data":"dd2bd06ac63740b7822ef7b91ae4f33fb58241c0f64ff881d8b8d807092bb571"} Nov 25 18:31:22 crc kubenswrapper[4926]: I1125 18:31:22.431703 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-zdqbf" podStartSLOduration=2.904582182 podStartE2EDuration="14.431682474s" podCreationTimestamp="2025-11-25 18:31:08 +0000 UTC" firstStartedPulling="2025-11-25 18:31:09.943102916 +0000 UTC m=+1100.328616521" lastFinishedPulling="2025-11-25 18:31:21.470203208 +0000 UTC m=+1111.855716813" observedRunningTime="2025-11-25 18:31:22.428000061 +0000 UTC m=+1112.813513666" watchObservedRunningTime="2025-11-25 18:31:22.431682474 +0000 UTC m=+1112.817196079" Nov 25 18:31:22 crc kubenswrapper[4926]: I1125 18:31:22.467464 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-gczcd" podStartSLOduration=2.663236512 podStartE2EDuration="15.467434923s" podCreationTimestamp="2025-11-25 18:31:07 +0000 UTC" firstStartedPulling="2025-11-25 18:31:08.632263039 +0000 UTC m=+1099.017776634" lastFinishedPulling="2025-11-25 18:31:21.43646143 +0000 UTC m=+1111.821975045" observedRunningTime="2025-11-25 18:31:22.455308555 +0000 UTC m=+1112.840822160" watchObservedRunningTime="2025-11-25 18:31:22.467434923 +0000 UTC m=+1112.852948518" Nov 25 18:31:26 crc kubenswrapper[4926]: I1125 18:31:26.455656 4926 generic.go:334] "Generic (PLEG): container finished" podID="f9865625-980a-4b3e-bb1e-53d5223db907" containerID="5db9a216fd2d187084d817e9246e63b11e0fb7838f40d5e4d3733590a9bf4e1f" exitCode=0 Nov 25 18:31:26 crc kubenswrapper[4926]: I1125 18:31:26.455867 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-zdqbf" event={"ID":"f9865625-980a-4b3e-bb1e-53d5223db907","Type":"ContainerDied","Data":"5db9a216fd2d187084d817e9246e63b11e0fb7838f40d5e4d3733590a9bf4e1f"} Nov 25 18:31:26 crc kubenswrapper[4926]: I1125 18:31:26.459517 4926 generic.go:334] "Generic (PLEG): container finished" podID="90a8c684-648e-4486-8da0-ff997b994626" containerID="dd2bd06ac63740b7822ef7b91ae4f33fb58241c0f64ff881d8b8d807092bb571" exitCode=0 Nov 25 18:31:26 crc kubenswrapper[4926]: I1125 18:31:26.459580 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gczcd" event={"ID":"90a8c684-648e-4486-8da0-ff997b994626","Type":"ContainerDied","Data":"dd2bd06ac63740b7822ef7b91ae4f33fb58241c0f64ff881d8b8d807092bb571"} Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.542027 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.543464 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.601757 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.609833 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.700206 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-zdqbf" event={"ID":"f9865625-980a-4b3e-bb1e-53d5223db907","Type":"ContainerDied","Data":"961e92e91defdfd23ebc464e4e9ba8936cc0a802ed865c5ed1ce86b62cca8bbd"} Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.700282 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="961e92e91defdfd23ebc464e4e9ba8936cc0a802ed865c5ed1ce86b62cca8bbd" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.700283 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-zdqbf" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.702559 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gczcd" event={"ID":"90a8c684-648e-4486-8da0-ff997b994626","Type":"ContainerDied","Data":"1d66dc056e7c228add5f3d35a4323c1e6413d778b7f7c63832c0ce983e5e7b3f"} Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.702584 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d66dc056e7c228add5f3d35a4323c1e6413d778b7f7c63832c0ce983e5e7b3f" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.702643 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gczcd" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.713588 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-combined-ca-bundle\") pod \"90a8c684-648e-4486-8da0-ff997b994626\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.713714 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-config-data\") pod \"90a8c684-648e-4486-8da0-ff997b994626\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.713746 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-combined-ca-bundle\") pod \"f9865625-980a-4b3e-bb1e-53d5223db907\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.713821 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24p8d\" (UniqueName: \"kubernetes.io/projected/90a8c684-648e-4486-8da0-ff997b994626-kube-api-access-24p8d\") pod \"90a8c684-648e-4486-8da0-ff997b994626\" (UID: \"90a8c684-648e-4486-8da0-ff997b994626\") " Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.713919 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-db-sync-config-data\") pod \"f9865625-980a-4b3e-bb1e-53d5223db907\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.713948 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-config-data\") pod \"f9865625-980a-4b3e-bb1e-53d5223db907\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.714048 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xfdl\" (UniqueName: \"kubernetes.io/projected/f9865625-980a-4b3e-bb1e-53d5223db907-kube-api-access-8xfdl\") pod \"f9865625-980a-4b3e-bb1e-53d5223db907\" (UID: \"f9865625-980a-4b3e-bb1e-53d5223db907\") " Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.719188 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f9865625-980a-4b3e-bb1e-53d5223db907" (UID: "f9865625-980a-4b3e-bb1e-53d5223db907"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.719245 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9865625-980a-4b3e-bb1e-53d5223db907-kube-api-access-8xfdl" (OuterVolumeSpecName: "kube-api-access-8xfdl") pod "f9865625-980a-4b3e-bb1e-53d5223db907" (UID: "f9865625-980a-4b3e-bb1e-53d5223db907"). InnerVolumeSpecName "kube-api-access-8xfdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.721490 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90a8c684-648e-4486-8da0-ff997b994626-kube-api-access-24p8d" (OuterVolumeSpecName: "kube-api-access-24p8d") pod "90a8c684-648e-4486-8da0-ff997b994626" (UID: "90a8c684-648e-4486-8da0-ff997b994626"). InnerVolumeSpecName "kube-api-access-24p8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.743949 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9865625-980a-4b3e-bb1e-53d5223db907" (UID: "f9865625-980a-4b3e-bb1e-53d5223db907"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.746158 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90a8c684-648e-4486-8da0-ff997b994626" (UID: "90a8c684-648e-4486-8da0-ff997b994626"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.768304 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-config-data" (OuterVolumeSpecName: "config-data") pod "90a8c684-648e-4486-8da0-ff997b994626" (UID: "90a8c684-648e-4486-8da0-ff997b994626"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.774111 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-config-data" (OuterVolumeSpecName: "config-data") pod "f9865625-980a-4b3e-bb1e-53d5223db907" (UID: "f9865625-980a-4b3e-bb1e-53d5223db907"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.815683 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.815732 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.815746 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24p8d\" (UniqueName: \"kubernetes.io/projected/90a8c684-648e-4486-8da0-ff997b994626-kube-api-access-24p8d\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.815757 4926 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.815765 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9865625-980a-4b3e-bb1e-53d5223db907-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.815773 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xfdl\" (UniqueName: \"kubernetes.io/projected/f9865625-980a-4b3e-bb1e-53d5223db907-kube-api-access-8xfdl\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:33 crc kubenswrapper[4926]: I1125 18:31:33.815782 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90a8c684-648e-4486-8da0-ff997b994626-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.716126 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wvbc9" event={"ID":"e56a5d23-9046-44fb-b484-19e044ee5ab7","Type":"ContainerStarted","Data":"edb0ac343c5bfb9b8d22be56c5801ed4c081200df785481f097931f0dd999f3c"} Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.745716 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-wvbc9" podStartSLOduration=4.493991914 podStartE2EDuration="15.745692581s" podCreationTimestamp="2025-11-25 18:31:19 +0000 UTC" firstStartedPulling="2025-11-25 18:31:22.209763977 +0000 UTC m=+1112.595277622" lastFinishedPulling="2025-11-25 18:31:33.461464684 +0000 UTC m=+1123.846978289" observedRunningTime="2025-11-25 18:31:34.734174748 +0000 UTC m=+1125.119688393" watchObservedRunningTime="2025-11-25 18:31:34.745692581 +0000 UTC m=+1125.131206206" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.929621 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-7z5gz"] Nov 25 18:31:34 crc kubenswrapper[4926]: E1125 18:31:34.930063 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90a8c684-648e-4486-8da0-ff997b994626" containerName="keystone-db-sync" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.930086 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="90a8c684-648e-4486-8da0-ff997b994626" containerName="keystone-db-sync" Nov 25 18:31:34 crc kubenswrapper[4926]: E1125 18:31:34.930113 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9865625-980a-4b3e-bb1e-53d5223db907" containerName="watcher-db-sync" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.930121 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9865625-980a-4b3e-bb1e-53d5223db907" containerName="watcher-db-sync" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.930274 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="90a8c684-648e-4486-8da0-ff997b994626" containerName="keystone-db-sync" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.930300 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9865625-980a-4b3e-bb1e-53d5223db907" containerName="watcher-db-sync" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.930984 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.934578 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.935058 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.936184 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2fxt6" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.937897 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.938242 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 18:31:34 crc kubenswrapper[4926]: I1125 18:31:34.971289 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7z5gz"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.011836 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56bcc7bf8c-k4ttx"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.020781 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.040683 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-credential-keys\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.040754 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtcbh\" (UniqueName: \"kubernetes.io/projected/3305e621-41e4-4b90-b63d-72dceab97c95-kube-api-access-vtcbh\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.040895 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-config-data\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.040972 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-combined-ca-bundle\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.041191 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-fernet-keys\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.041320 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-scripts\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.049992 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56bcc7bf8c-k4ttx"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.125655 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.126871 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.129342 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-fn7fb" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.129443 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.143620 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-swift-storage-0\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.143671 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-nb\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.143716 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-credential-keys\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.143857 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-sb\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.143920 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtcbh\" (UniqueName: \"kubernetes.io/projected/3305e621-41e4-4b90-b63d-72dceab97c95-kube-api-access-vtcbh\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.143954 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlplh\" (UniqueName: \"kubernetes.io/projected/59f8625b-c52e-4541-abc3-caa41dea86b3-kube-api-access-nlplh\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.144011 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-config-data\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.144051 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-combined-ca-bundle\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.144127 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-config\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.144200 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-svc\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.144260 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-fernet-keys\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.144331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-scripts\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.144925 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.147008 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.155586 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-config-data\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.157933 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-scripts\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.163610 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.164824 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-credential-keys\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.166112 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-fernet-keys\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.180123 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-combined-ca-bundle\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.188018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtcbh\" (UniqueName: \"kubernetes.io/projected/3305e621-41e4-4b90-b63d-72dceab97c95-kube-api-access-vtcbh\") pod \"keystone-bootstrap-7z5gz\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.201108 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.248045 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.249875 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cf27ee2-c953-4788-8738-5359f186de3d-logs\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.249944 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-config-data\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.249978 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-swift-storage-0\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250000 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-nb\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250031 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4djfn\" (UniqueName: \"kubernetes.io/projected/2cf27ee2-c953-4788-8738-5359f186de3d-kube-api-access-4djfn\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250058 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skk2n\" (UniqueName: \"kubernetes.io/projected/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-kube-api-access-skk2n\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250078 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-logs\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250107 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-sb\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250128 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250158 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlplh\" (UniqueName: \"kubernetes.io/projected/59f8625b-c52e-4541-abc3-caa41dea86b3-kube-api-access-nlplh\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250186 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-config-data\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250206 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250227 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250256 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250288 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-config\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.250326 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-svc\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.251262 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-svc\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.252178 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-sb\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.253667 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-swift-storage-0\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.260759 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-config\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.261152 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.332755 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-nb\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.358316 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlplh\" (UniqueName: \"kubernetes.io/projected/59f8625b-c52e-4541-abc3-caa41dea86b3-kube-api-access-nlplh\") pod \"dnsmasq-dns-56bcc7bf8c-k4ttx\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.360156 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cf27ee2-c953-4788-8738-5359f186de3d-logs\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.371368 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-config-data\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.371638 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4djfn\" (UniqueName: \"kubernetes.io/projected/2cf27ee2-c953-4788-8738-5359f186de3d-kube-api-access-4djfn\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.371740 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skk2n\" (UniqueName: \"kubernetes.io/projected/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-kube-api-access-skk2n\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.371806 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-logs\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.371895 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.372003 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-config-data\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.372167 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.372247 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.372341 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.374932 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cf27ee2-c953-4788-8738-5359f186de3d-logs\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.375984 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-logs\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.391812 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-config-data\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.424498 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.424887 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.426733 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.426759 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.429286 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-config-data\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.502450 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.503808 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.525314 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4djfn\" (UniqueName: \"kubernetes.io/projected/2cf27ee2-c953-4788-8738-5359f186de3d-kube-api-access-4djfn\") pod \"watcher-api-0\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.525889 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.540033 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skk2n\" (UniqueName: \"kubernetes.io/projected/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-kube-api-access-skk2n\") pod \"watcher-decision-engine-0\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.573861 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.598467 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-stgf4"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.599965 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.610887 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.617959 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.643889 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.644533 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2sclw" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.644663 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.645833 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.645870 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.647259 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.651527 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-jnbt6"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.669023 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.673625 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-586d54b5d5-wchlr"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.675327 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.696247 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.696712 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67831a6a-38bf-4f8c-98a8-c8cb0274e218-logs\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.696840 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbhsm\" (UniqueName: \"kubernetes.io/projected/67831a6a-38bf-4f8c-98a8-c8cb0274e218-kube-api-access-nbhsm\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.697326 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-config-data\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.723206 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-stgf4"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.753917 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.754158 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.754274 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-69jjt" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.755815 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.755970 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-28tsb" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.756062 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.756094 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.761169 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.761494 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.782574 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799315 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-config-data\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799419 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-config-data\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799444 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-combined-ca-bundle\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799486 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-scripts\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799507 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-db-sync-config-data\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799524 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54l8z\" (UniqueName: \"kubernetes.io/projected/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-kube-api-access-54l8z\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799572 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-scripts\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799597 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-logs\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799616 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-config-data\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799653 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799673 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-log-httpd\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799748 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-run-httpd\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0138f286-e018-42de-b145-2cda09144394-etc-machine-id\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799813 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67831a6a-38bf-4f8c-98a8-c8cb0274e218-logs\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799831 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbhsm\" (UniqueName: \"kubernetes.io/projected/67831a6a-38bf-4f8c-98a8-c8cb0274e218-kube-api-access-nbhsm\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799849 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.799869 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gw92\" (UniqueName: \"kubernetes.io/projected/102b4780-5da7-4b86-9679-e87417b4ee5a-kube-api-access-9gw92\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.800252 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-config\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.800277 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-config-data\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.800313 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-combined-ca-bundle\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.800332 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpc52\" (UniqueName: \"kubernetes.io/projected/0138f286-e018-42de-b145-2cda09144394-kube-api-access-lpc52\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.804725 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-horizon-secret-key\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.804812 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.804887 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-scripts\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.804964 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwx25\" (UniqueName: \"kubernetes.io/projected/434bea04-3768-493f-8d01-36f9c41bc811-kube-api-access-kwx25\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.812560 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67831a6a-38bf-4f8c-98a8-c8cb0274e218-logs\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.840498 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-586d54b5d5-wchlr"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.846478 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-config-data\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.847495 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.862023 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jnbt6"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.911478 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-config-data\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.911751 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-combined-ca-bundle\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912107 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-scripts\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912148 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-db-sync-config-data\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912190 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54l8z\" (UniqueName: \"kubernetes.io/projected/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-kube-api-access-54l8z\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912220 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-scripts\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912714 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-logs\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912777 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-config-data\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912812 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-log-httpd\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912940 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-run-httpd\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.912970 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0138f286-e018-42de-b145-2cda09144394-etc-machine-id\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913120 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913271 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gw92\" (UniqueName: \"kubernetes.io/projected/102b4780-5da7-4b86-9679-e87417b4ee5a-kube-api-access-9gw92\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913308 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-config\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-config-data\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913363 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-combined-ca-bundle\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913398 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpc52\" (UniqueName: \"kubernetes.io/projected/0138f286-e018-42de-b145-2cda09144394-kube-api-access-lpc52\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913451 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-horizon-secret-key\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913477 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913516 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-scripts\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.913556 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwx25\" (UniqueName: \"kubernetes.io/projected/434bea04-3768-493f-8d01-36f9c41bc811-kube-api-access-kwx25\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.924952 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-config\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.930767 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbhsm\" (UniqueName: \"kubernetes.io/projected/67831a6a-38bf-4f8c-98a8-c8cb0274e218-kube-api-access-nbhsm\") pod \"watcher-applier-0\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.931071 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-log-httpd\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.931595 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0138f286-e018-42de-b145-2cda09144394-etc-machine-id\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.933933 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-run-httpd\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.939250 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-scripts\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.940601 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-combined-ca-bundle\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.941408 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-scripts\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.941613 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.951684 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-logs\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.955256 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-config-data\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.957043 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-db-sync-config-data\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.960263 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.960335 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-pvqdw"] Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.961588 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.963345 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54l8z\" (UniqueName: \"kubernetes.io/projected/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-kube-api-access-54l8z\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.973130 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-horizon-secret-key\") pod \"horizon-586d54b5d5-wchlr\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.980157 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.982662 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-config-data\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.982934 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-config-data\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.982996 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-scripts\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.985999 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.989629 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gw92\" (UniqueName: \"kubernetes.io/projected/102b4780-5da7-4b86-9679-e87417b4ee5a-kube-api-access-9gw92\") pod \"ceilometer-0\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " pod="openstack/ceilometer-0" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.988079 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-combined-ca-bundle\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.989816 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 18:31:35 crc kubenswrapper[4926]: I1125 18:31:35.991849 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-wzmlf" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.012126 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpc52\" (UniqueName: \"kubernetes.io/projected/0138f286-e018-42de-b145-2cda09144394-kube-api-access-lpc52\") pod \"cinder-db-sync-jnbt6\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.012235 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-pvqdw"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.036900 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwx25\" (UniqueName: \"kubernetes.io/projected/434bea04-3768-493f-8d01-36f9c41bc811-kube-api-access-kwx25\") pod \"neutron-db-sync-stgf4\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.046799 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-65d76cdbdf-vktzr"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.048550 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.074476 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-stgf4" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.093924 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-x8d9g"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.095828 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.099436 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-44z7v" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.101607 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.108859 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x8d9g"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140458 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf2tn\" (UniqueName: \"kubernetes.io/projected/906ba89b-3a3b-4823-9f86-b7c9664277cd-kube-api-access-gf2tn\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140518 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-config-data\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140551 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8d9t\" (UniqueName: \"kubernetes.io/projected/4addbd96-e765-4c9c-b260-5a80700849d2-kube-api-access-n8d9t\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140580 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906ba89b-3a3b-4823-9f86-b7c9664277cd-logs\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140631 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-scripts\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140741 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-combined-ca-bundle\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140840 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-config-data\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140893 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-scripts\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.140969 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4addbd96-e765-4c9c-b260-5a80700849d2-logs\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.141071 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/906ba89b-3a3b-4823-9f86-b7c9664277cd-horizon-secret-key\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.153343 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.174030 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65d76cdbdf-vktzr"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.185504 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bcc7bf8c-k4ttx"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.191954 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.213128 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6489b9cb77-sskkb"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.217413 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.227354 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6489b9cb77-sskkb"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.228057 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.242814 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf2tn\" (UniqueName: \"kubernetes.io/projected/906ba89b-3a3b-4823-9f86-b7c9664277cd-kube-api-access-gf2tn\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243252 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-config-data\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243280 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8d9t\" (UniqueName: \"kubernetes.io/projected/4addbd96-e765-4c9c-b260-5a80700849d2-kube-api-access-n8d9t\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243302 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906ba89b-3a3b-4823-9f86-b7c9664277cd-logs\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243325 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-scripts\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243355 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-combined-ca-bundle\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243590 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-config-data\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243609 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-scripts\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243643 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-db-sync-config-data\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243659 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4addbd96-e765-4c9c-b260-5a80700849d2-logs\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243745 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/906ba89b-3a3b-4823-9f86-b7c9664277cd-horizon-secret-key\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243792 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-combined-ca-bundle\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.243826 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr9c7\" (UniqueName: \"kubernetes.io/projected/017caf97-9f18-49b8-b6e6-597c709e3420-kube-api-access-sr9c7\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.246998 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906ba89b-3a3b-4823-9f86-b7c9664277cd-logs\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.248216 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4addbd96-e765-4c9c-b260-5a80700849d2-logs\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.251477 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-scripts\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.251483 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-config-data\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.256019 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-scripts\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.257008 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/906ba89b-3a3b-4823-9f86-b7c9664277cd-horizon-secret-key\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.257187 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-combined-ca-bundle\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.257541 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-config-data\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.266502 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf2tn\" (UniqueName: \"kubernetes.io/projected/906ba89b-3a3b-4823-9f86-b7c9664277cd-kube-api-access-gf2tn\") pod \"horizon-65d76cdbdf-vktzr\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.271534 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8d9t\" (UniqueName: \"kubernetes.io/projected/4addbd96-e765-4c9c-b260-5a80700849d2-kube-api-access-n8d9t\") pod \"placement-db-sync-pvqdw\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347271 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr9c7\" (UniqueName: \"kubernetes.io/projected/017caf97-9f18-49b8-b6e6-597c709e3420-kube-api-access-sr9c7\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347339 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-nb\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347401 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lk9d\" (UniqueName: \"kubernetes.io/projected/3f8be116-3710-4f26-bde2-348f96675a2c-kube-api-access-4lk9d\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347424 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-swift-storage-0\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347463 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-sb\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347487 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-db-sync-config-data\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347506 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-svc\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347546 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-config\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.347580 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-combined-ca-bundle\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.368229 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-combined-ca-bundle\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.368246 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-db-sync-config-data\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.405016 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pvqdw" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.405337 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7z5gz"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.450086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-config\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.450280 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-nb\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.450342 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lk9d\" (UniqueName: \"kubernetes.io/projected/3f8be116-3710-4f26-bde2-348f96675a2c-kube-api-access-4lk9d\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.450363 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-swift-storage-0\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.450433 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-sb\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.450462 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-svc\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.452052 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-config\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.457564 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.458284 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr9c7\" (UniqueName: \"kubernetes.io/projected/017caf97-9f18-49b8-b6e6-597c709e3420-kube-api-access-sr9c7\") pod \"barbican-db-sync-x8d9g\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.459107 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-nb\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.459349 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-swift-storage-0\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.460175 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-sb\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.463007 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-svc\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.467937 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.479779 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lk9d\" (UniqueName: \"kubernetes.io/projected/3f8be116-3710-4f26-bde2-348f96675a2c-kube-api-access-4lk9d\") pod \"dnsmasq-dns-6489b9cb77-sskkb\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.482302 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.512714 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.548025 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.772990 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9","Type":"ContainerStarted","Data":"390cc830c2f770a5cb6c44a7a432ab3f8df85a276ac91d2ad1130768c748eb82"} Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.777330 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7z5gz" event={"ID":"3305e621-41e4-4b90-b63d-72dceab97c95","Type":"ContainerStarted","Data":"d79cfcd0c21c9c060df668353ce36c65d533e4dd27a0859191931bd47e44f11a"} Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.778735 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2cf27ee2-c953-4788-8738-5359f186de3d","Type":"ContainerStarted","Data":"b29328dc7e142b9d789f40dec47baeb25e5e7b6069b8446586e90c6dcbd7f1aa"} Nov 25 18:31:36 crc kubenswrapper[4926]: I1125 18:31:36.991923 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bcc7bf8c-k4ttx"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.082918 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-stgf4"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.133389 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.291967 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-pvqdw"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.317437 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jnbt6"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.331269 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-586d54b5d5-wchlr"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.350138 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.576627 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65d76cdbdf-vktzr"] Nov 25 18:31:37 crc kubenswrapper[4926]: W1125 18:31:37.600658 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod906ba89b_3a3b_4823_9f86_b7c9664277cd.slice/crio-c3414176a5cb1e06b2cb1144753c8f4c31692cbce99231da36db67872a2c0ee8 WatchSource:0}: Error finding container c3414176a5cb1e06b2cb1144753c8f4c31692cbce99231da36db67872a2c0ee8: Status 404 returned error can't find the container with id c3414176a5cb1e06b2cb1144753c8f4c31692cbce99231da36db67872a2c0ee8 Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.644937 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x8d9g"] Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.655641 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6489b9cb77-sskkb"] Nov 25 18:31:37 crc kubenswrapper[4926]: W1125 18:31:37.665121 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod017caf97_9f18_49b8_b6e6_597c709e3420.slice/crio-efd81a38d1e53937f887af6457db861759d9a6a1992037d40de2684ed56bd9f0 WatchSource:0}: Error finding container efd81a38d1e53937f887af6457db861759d9a6a1992037d40de2684ed56bd9f0: Status 404 returned error can't find the container with id efd81a38d1e53937f887af6457db861759d9a6a1992037d40de2684ed56bd9f0 Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.806135 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-stgf4" event={"ID":"434bea04-3768-493f-8d01-36f9c41bc811","Type":"ContainerStarted","Data":"bd08fcae9f750343c0ae3eb8c74cc250d0588c9bced80844a18fa4bf4c809509"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.820052 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"67831a6a-38bf-4f8c-98a8-c8cb0274e218","Type":"ContainerStarted","Data":"d02d55465204343c01ee7a14aaec70e612220e748441d6f1c435527067817635"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.823406 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7z5gz" event={"ID":"3305e621-41e4-4b90-b63d-72dceab97c95","Type":"ContainerStarted","Data":"90e3e242ecf190558022eeac560a84bddae81d66ba85e65e69044613f25474fd"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.828694 4926 generic.go:334] "Generic (PLEG): container finished" podID="59f8625b-c52e-4541-abc3-caa41dea86b3" containerID="f0319581c13ed7cd7b3c3d5184d9de528bc579c34e880eeb587a5af0ccd9bd74" exitCode=0 Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.828903 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" event={"ID":"59f8625b-c52e-4541-abc3-caa41dea86b3","Type":"ContainerDied","Data":"f0319581c13ed7cd7b3c3d5184d9de528bc579c34e880eeb587a5af0ccd9bd74"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.828926 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" event={"ID":"59f8625b-c52e-4541-abc3-caa41dea86b3","Type":"ContainerStarted","Data":"1b64cb0f86d462bca35afb9ca4d58f68cef8f3a8e15fb06be71d63bdf836c9cd"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.837342 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerStarted","Data":"8ad478716dbcd108968d441e8c96875dd7521bfb7415051754ed48bbfa0b2aad"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.844908 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jnbt6" event={"ID":"0138f286-e018-42de-b145-2cda09144394","Type":"ContainerStarted","Data":"b8fc94354958fe153cb45b06179b3d522a7b0e0fe5540293447b458e2afcd26c"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.847077 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-7z5gz" podStartSLOduration=3.8470551630000003 podStartE2EDuration="3.847055163s" podCreationTimestamp="2025-11-25 18:31:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:37.84461181 +0000 UTC m=+1128.230125425" watchObservedRunningTime="2025-11-25 18:31:37.847055163 +0000 UTC m=+1128.232568758" Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.880025 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pvqdw" event={"ID":"4addbd96-e765-4c9c-b260-5a80700849d2","Type":"ContainerStarted","Data":"88e9957e39cb384b940c1bff5a9b40ae1df9cf0326df966a9b91c7680fd65ff9"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.944100 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65d76cdbdf-vktzr" event={"ID":"906ba89b-3a3b-4823-9f86-b7c9664277cd","Type":"ContainerStarted","Data":"c3414176a5cb1e06b2cb1144753c8f4c31692cbce99231da36db67872a2c0ee8"} Nov 25 18:31:37 crc kubenswrapper[4926]: I1125 18:31:37.984739 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-586d54b5d5-wchlr" event={"ID":"a42599e0-4206-482d-8c8f-5d5e4a0e10ba","Type":"ContainerStarted","Data":"cee5a93502e0613148981c72e6fe2b7fdc57785b90b01b8985d0ba915b5e5a4d"} Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.012787 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2cf27ee2-c953-4788-8738-5359f186de3d","Type":"ContainerStarted","Data":"ae7f365180802b10bfc7603c1f7623792af2a90712e0641ca4103520eea1f998"} Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.012842 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2cf27ee2-c953-4788-8738-5359f186de3d","Type":"ContainerStarted","Data":"00b7aea7b91cbbd776bb314783a455b9a5d1db3bfe52f1cadde1d772c2707351"} Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.013306 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.023709 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" event={"ID":"3f8be116-3710-4f26-bde2-348f96675a2c","Type":"ContainerStarted","Data":"dfbc5abc69f35fd3f8ca18dd647f0195e0bd730afec376c2c5a888f0aec3f2e8"} Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.042142 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=3.042114189 podStartE2EDuration="3.042114189s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:38.030174384 +0000 UTC m=+1128.415687989" watchObservedRunningTime="2025-11-25 18:31:38.042114189 +0000 UTC m=+1128.427627794" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.059616 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x8d9g" event={"ID":"017caf97-9f18-49b8-b6e6-597c709e3420","Type":"ContainerStarted","Data":"efd81a38d1e53937f887af6457db861759d9a6a1992037d40de2684ed56bd9f0"} Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.318507 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.412313 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-586d54b5d5-wchlr"] Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.509874 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7fd689864c-5qb7v"] Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.519232 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.536667 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.555764 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7fd689864c-5qb7v"] Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.581150 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.660690 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-nb\") pod \"59f8625b-c52e-4541-abc3-caa41dea86b3\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661179 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-config\") pod \"59f8625b-c52e-4541-abc3-caa41dea86b3\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661249 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlplh\" (UniqueName: \"kubernetes.io/projected/59f8625b-c52e-4541-abc3-caa41dea86b3-kube-api-access-nlplh\") pod \"59f8625b-c52e-4541-abc3-caa41dea86b3\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661281 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-sb\") pod \"59f8625b-c52e-4541-abc3-caa41dea86b3\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661395 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-svc\") pod \"59f8625b-c52e-4541-abc3-caa41dea86b3\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661452 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-swift-storage-0\") pod \"59f8625b-c52e-4541-abc3-caa41dea86b3\" (UID: \"59f8625b-c52e-4541-abc3-caa41dea86b3\") " Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/166aef77-b73a-497a-886d-a66a548bff2d-horizon-secret-key\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661798 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166aef77-b73a-497a-886d-a66a548bff2d-logs\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661940 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn4gq\" (UniqueName: \"kubernetes.io/projected/166aef77-b73a-497a-886d-a66a548bff2d-kube-api-access-fn4gq\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.661965 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-config-data\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.662058 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-scripts\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.680702 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59f8625b-c52e-4541-abc3-caa41dea86b3-kube-api-access-nlplh" (OuterVolumeSpecName: "kube-api-access-nlplh") pod "59f8625b-c52e-4541-abc3-caa41dea86b3" (UID: "59f8625b-c52e-4541-abc3-caa41dea86b3"). InnerVolumeSpecName "kube-api-access-nlplh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.742017 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-config" (OuterVolumeSpecName: "config") pod "59f8625b-c52e-4541-abc3-caa41dea86b3" (UID: "59f8625b-c52e-4541-abc3-caa41dea86b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.743332 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "59f8625b-c52e-4541-abc3-caa41dea86b3" (UID: "59f8625b-c52e-4541-abc3-caa41dea86b3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.753270 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "59f8625b-c52e-4541-abc3-caa41dea86b3" (UID: "59f8625b-c52e-4541-abc3-caa41dea86b3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.757892 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "59f8625b-c52e-4541-abc3-caa41dea86b3" (UID: "59f8625b-c52e-4541-abc3-caa41dea86b3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764503 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn4gq\" (UniqueName: \"kubernetes.io/projected/166aef77-b73a-497a-886d-a66a548bff2d-kube-api-access-fn4gq\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764567 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-config-data\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764676 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-scripts\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764718 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/166aef77-b73a-497a-886d-a66a548bff2d-horizon-secret-key\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764738 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166aef77-b73a-497a-886d-a66a548bff2d-logs\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764882 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764906 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlplh\" (UniqueName: \"kubernetes.io/projected/59f8625b-c52e-4541-abc3-caa41dea86b3-kube-api-access-nlplh\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764919 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764934 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.764945 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.767858 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-scripts\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.771592 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-config-data\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.771889 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166aef77-b73a-497a-886d-a66a548bff2d-logs\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.788552 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn4gq\" (UniqueName: \"kubernetes.io/projected/166aef77-b73a-497a-886d-a66a548bff2d-kube-api-access-fn4gq\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.790937 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/166aef77-b73a-497a-886d-a66a548bff2d-horizon-secret-key\") pod \"horizon-7fd689864c-5qb7v\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.822050 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "59f8625b-c52e-4541-abc3-caa41dea86b3" (UID: "59f8625b-c52e-4541-abc3-caa41dea86b3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.856059 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:31:38 crc kubenswrapper[4926]: I1125 18:31:38.867138 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59f8625b-c52e-4541-abc3-caa41dea86b3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.077648 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.077522 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56bcc7bf8c-k4ttx" event={"ID":"59f8625b-c52e-4541-abc3-caa41dea86b3","Type":"ContainerDied","Data":"1b64cb0f86d462bca35afb9ca4d58f68cef8f3a8e15fb06be71d63bdf836c9cd"} Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.077835 4926 scope.go:117] "RemoveContainer" containerID="f0319581c13ed7cd7b3c3d5184d9de528bc579c34e880eeb587a5af0ccd9bd74" Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.080992 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-stgf4" event={"ID":"434bea04-3768-493f-8d01-36f9c41bc811","Type":"ContainerStarted","Data":"c532dd2be7b122443980153da9bb60983ebf9d3d9ac7994e8762e25ecbf02b10"} Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.084617 4926 generic.go:334] "Generic (PLEG): container finished" podID="3f8be116-3710-4f26-bde2-348f96675a2c" containerID="021cbc7140a6306af030a2c0f70e9f62bf1407c823aa68b79b6f07528174129b" exitCode=0 Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.084704 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" event={"ID":"3f8be116-3710-4f26-bde2-348f96675a2c","Type":"ContainerDied","Data":"021cbc7140a6306af030a2c0f70e9f62bf1407c823aa68b79b6f07528174129b"} Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.114687 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-stgf4" podStartSLOduration=4.114655907 podStartE2EDuration="4.114655907s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:39.09393124 +0000 UTC m=+1129.479444845" watchObservedRunningTime="2025-11-25 18:31:39.114655907 +0000 UTC m=+1129.500169502" Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.269453 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bcc7bf8c-k4ttx"] Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.299477 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56bcc7bf8c-k4ttx"] Nov 25 18:31:39 crc kubenswrapper[4926]: I1125 18:31:39.563757 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7fd689864c-5qb7v"] Nov 25 18:31:39 crc kubenswrapper[4926]: W1125 18:31:39.593878 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod166aef77_b73a_497a_886d_a66a548bff2d.slice/crio-a7a0505f647f70070835b00f050a718e0a06aac972d5d775fb30bf26d4938645 WatchSource:0}: Error finding container a7a0505f647f70070835b00f050a718e0a06aac972d5d775fb30bf26d4938645: Status 404 returned error can't find the container with id a7a0505f647f70070835b00f050a718e0a06aac972d5d775fb30bf26d4938645 Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.100892 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd689864c-5qb7v" event={"ID":"166aef77-b73a-497a-886d-a66a548bff2d","Type":"ContainerStarted","Data":"a7a0505f647f70070835b00f050a718e0a06aac972d5d775fb30bf26d4938645"} Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.119286 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" event={"ID":"3f8be116-3710-4f26-bde2-348f96675a2c","Type":"ContainerStarted","Data":"04ecc17731a8f7003a6eeaa3ad4ab169febc4fffb25b0ad3ba7b22cc17c10607"} Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.119387 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.119489 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api-log" containerID="cri-o://00b7aea7b91cbbd776bb314783a455b9a5d1db3bfe52f1cadde1d772c2707351" gracePeriod=30 Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.119701 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.119752 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" containerID="cri-o://ae7f365180802b10bfc7603c1f7623792af2a90712e0641ca4103520eea1f998" gracePeriod=30 Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.125428 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": EOF" Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.132519 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": EOF" Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.145022 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" podStartSLOduration=5.144979583 podStartE2EDuration="5.144979583s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:31:40.141847353 +0000 UTC m=+1130.527360958" watchObservedRunningTime="2025-11-25 18:31:40.144979583 +0000 UTC m=+1130.530493188" Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.350851 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59f8625b-c52e-4541-abc3-caa41dea86b3" path="/var/lib/kubelet/pods/59f8625b-c52e-4541-abc3-caa41dea86b3/volumes" Nov 25 18:31:40 crc kubenswrapper[4926]: I1125 18:31:40.647037 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 25 18:31:41 crc kubenswrapper[4926]: I1125 18:31:41.153542 4926 generic.go:334] "Generic (PLEG): container finished" podID="2cf27ee2-c953-4788-8738-5359f186de3d" containerID="00b7aea7b91cbbd776bb314783a455b9a5d1db3bfe52f1cadde1d772c2707351" exitCode=143 Nov 25 18:31:41 crc kubenswrapper[4926]: I1125 18:31:41.153817 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2cf27ee2-c953-4788-8738-5359f186de3d","Type":"ContainerDied","Data":"00b7aea7b91cbbd776bb314783a455b9a5d1db3bfe52f1cadde1d772c2707351"} Nov 25 18:31:44 crc kubenswrapper[4926]: I1125 18:31:44.263086 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": read tcp 10.217.0.2:54776->10.217.0.150:9322: read: connection reset by peer" Nov 25 18:31:44 crc kubenswrapper[4926]: I1125 18:31:44.938987 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-65d76cdbdf-vktzr"] Nov 25 18:31:44 crc kubenswrapper[4926]: I1125 18:31:44.964770 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-765875bb4b-tr7fm"] Nov 25 18:31:44 crc kubenswrapper[4926]: E1125 18:31:44.965239 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59f8625b-c52e-4541-abc3-caa41dea86b3" containerName="init" Nov 25 18:31:44 crc kubenswrapper[4926]: I1125 18:31:44.965259 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="59f8625b-c52e-4541-abc3-caa41dea86b3" containerName="init" Nov 25 18:31:44 crc kubenswrapper[4926]: I1125 18:31:44.965454 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="59f8625b-c52e-4541-abc3-caa41dea86b3" containerName="init" Nov 25 18:31:44 crc kubenswrapper[4926]: I1125 18:31:44.966575 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:44 crc kubenswrapper[4926]: I1125 18:31:44.990239 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.023710 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-765875bb4b-tr7fm"] Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.054784 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-config-data\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.055034 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-tls-certs\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.058376 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k597c\" (UniqueName: \"kubernetes.io/projected/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-kube-api-access-k597c\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.061851 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-logs\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.061937 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-scripts\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.061964 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-combined-ca-bundle\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.062060 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-secret-key\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.078385 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7fd689864c-5qb7v"] Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.088474 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-58ffdb7978-lnv9j"] Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.093308 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58ffdb7978-lnv9j"] Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.093476 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163467 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-config-data\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163523 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-tls-certs\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163563 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k597c\" (UniqueName: \"kubernetes.io/projected/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-kube-api-access-k597c\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163594 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x86rz\" (UniqueName: \"kubernetes.io/projected/f0edd267-7b26-44cc-a576-552e8ff49e66-kube-api-access-x86rz\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163627 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-horizon-secret-key\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163660 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f0edd267-7b26-44cc-a576-552e8ff49e66-config-data\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163678 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0edd267-7b26-44cc-a576-552e8ff49e66-logs\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163697 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-horizon-tls-certs\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163715 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-combined-ca-bundle\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163741 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-logs\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163765 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0edd267-7b26-44cc-a576-552e8ff49e66-scripts\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163783 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-scripts\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163801 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-combined-ca-bundle\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.163832 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-secret-key\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.164327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-logs\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.164888 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-config-data\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.164926 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-scripts\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.172724 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-secret-key\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.174448 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-tls-certs\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.178885 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-combined-ca-bundle\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.195209 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k597c\" (UniqueName: \"kubernetes.io/projected/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-kube-api-access-k597c\") pod \"horizon-765875bb4b-tr7fm\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.265543 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-horizon-secret-key\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.265624 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f0edd267-7b26-44cc-a576-552e8ff49e66-config-data\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.265654 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0edd267-7b26-44cc-a576-552e8ff49e66-logs\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.265680 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-horizon-tls-certs\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.265697 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-combined-ca-bundle\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.265745 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0edd267-7b26-44cc-a576-552e8ff49e66-scripts\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.265860 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x86rz\" (UniqueName: \"kubernetes.io/projected/f0edd267-7b26-44cc-a576-552e8ff49e66-kube-api-access-x86rz\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.266189 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0edd267-7b26-44cc-a576-552e8ff49e66-logs\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.267163 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f0edd267-7b26-44cc-a576-552e8ff49e66-config-data\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.268242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0edd267-7b26-44cc-a576-552e8ff49e66-scripts\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.269873 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-horizon-secret-key\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.269891 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-combined-ca-bundle\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.270114 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0edd267-7b26-44cc-a576-552e8ff49e66-horizon-tls-certs\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.284713 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x86rz\" (UniqueName: \"kubernetes.io/projected/f0edd267-7b26-44cc-a576-552e8ff49e66-kube-api-access-x86rz\") pod \"horizon-58ffdb7978-lnv9j\" (UID: \"f0edd267-7b26-44cc-a576-552e8ff49e66\") " pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.316308 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.429364 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:31:45 crc kubenswrapper[4926]: I1125 18:31:45.650314 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 25 18:31:46 crc kubenswrapper[4926]: I1125 18:31:46.553450 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:31:46 crc kubenswrapper[4926]: I1125 18:31:46.624522 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f57d4bfc9-868kb"] Nov 25 18:31:46 crc kubenswrapper[4926]: I1125 18:31:46.624851 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="dnsmasq-dns" containerID="cri-o://7f73d395b5a9159ab90c796cbd17a5c78b3543a9e23f95a8ef138fd3f5754fc7" gracePeriod=10 Nov 25 18:31:47 crc kubenswrapper[4926]: I1125 18:31:47.236886 4926 generic.go:334] "Generic (PLEG): container finished" podID="2cf27ee2-c953-4788-8738-5359f186de3d" containerID="ae7f365180802b10bfc7603c1f7623792af2a90712e0641ca4103520eea1f998" exitCode=0 Nov 25 18:31:47 crc kubenswrapper[4926]: I1125 18:31:47.236933 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2cf27ee2-c953-4788-8738-5359f186de3d","Type":"ContainerDied","Data":"ae7f365180802b10bfc7603c1f7623792af2a90712e0641ca4103520eea1f998"} Nov 25 18:31:48 crc kubenswrapper[4926]: I1125 18:31:48.255884 4926 generic.go:334] "Generic (PLEG): container finished" podID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerID="7f73d395b5a9159ab90c796cbd17a5c78b3543a9e23f95a8ef138fd3f5754fc7" exitCode=0 Nov 25 18:31:48 crc kubenswrapper[4926]: I1125 18:31:48.256041 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" event={"ID":"4ca20e88-39d9-463a-8a59-909b495d8a8b","Type":"ContainerDied","Data":"7f73d395b5a9159ab90c796cbd17a5c78b3543a9e23f95a8ef138fd3f5754fc7"} Nov 25 18:31:48 crc kubenswrapper[4926]: I1125 18:31:48.436348 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: connect: connection refused" Nov 25 18:31:50 crc kubenswrapper[4926]: I1125 18:31:50.647218 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 25 18:31:54 crc kubenswrapper[4926]: E1125 18:31:54.554149 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 25 18:31:54 crc kubenswrapper[4926]: E1125 18:31:54.555218 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 25 18:31:54 crc kubenswrapper[4926]: E1125 18:31:54.555864 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfh97h665hc5h7h65h55h55dh69h76hffh549hb5h549h65bh584h58hb9h647h5bhd8h66dh589h549h66dh557h57h68chb6h5d8h547h5cq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-54l8z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-586d54b5d5-wchlr_openstack(a42599e0-4206-482d-8c8f-5d5e4a0e10ba): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:31:54 crc kubenswrapper[4926]: E1125 18:31:54.567759 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-586d54b5d5-wchlr" podUID="a42599e0-4206-482d-8c8f-5d5e4a0e10ba" Nov 25 18:31:56 crc kubenswrapper[4926]: I1125 18:31:56.350394 4926 generic.go:334] "Generic (PLEG): container finished" podID="3305e621-41e4-4b90-b63d-72dceab97c95" containerID="90e3e242ecf190558022eeac560a84bddae81d66ba85e65e69044613f25474fd" exitCode=0 Nov 25 18:31:56 crc kubenswrapper[4926]: I1125 18:31:56.350471 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7z5gz" event={"ID":"3305e621-41e4-4b90-b63d-72dceab97c95","Type":"ContainerDied","Data":"90e3e242ecf190558022eeac560a84bddae81d66ba85e65e69044613f25474fd"} Nov 25 18:31:57 crc kubenswrapper[4926]: E1125 18:31:57.209465 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Nov 25 18:31:57 crc kubenswrapper[4926]: E1125 18:31:57.209899 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Nov 25 18:31:57 crc kubenswrapper[4926]: E1125 18:31:57.210060 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:38.102.83.27:5001/podified-master-centos10/openstack-placement-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n8d9t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-pvqdw_openstack(4addbd96-e765-4c9c-b260-5a80700849d2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:31:57 crc kubenswrapper[4926]: E1125 18:31:57.211256 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-pvqdw" podUID="4addbd96-e765-4c9c-b260-5a80700849d2" Nov 25 18:31:57 crc kubenswrapper[4926]: E1125 18:31:57.362791 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-placement-api:watcher_latest\\\"\"" pod="openstack/placement-db-sync-pvqdw" podUID="4addbd96-e765-4c9c-b260-5a80700849d2" Nov 25 18:31:58 crc kubenswrapper[4926]: I1125 18:31:58.435724 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: i/o timeout" Nov 25 18:31:59 crc kubenswrapper[4926]: E1125 18:31:59.584289 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 25 18:31:59 crc kubenswrapper[4926]: E1125 18:31:59.584900 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 25 18:31:59 crc kubenswrapper[4926]: E1125 18:31:59.585168 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n674h99h77h59hbch99h577hcch9fh698h66h9hf5hb9h56h5b4h656hd7h584h8dhb9hcch5f5h695h66bh555h586h66dh67fh646h665h58bq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2tn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-65d76cdbdf-vktzr_openstack(906ba89b-3a3b-4823-9f86-b7c9664277cd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:31:59 crc kubenswrapper[4926]: E1125 18:31:59.587614 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-65d76cdbdf-vktzr" podUID="906ba89b-3a3b-4823-9f86-b7c9664277cd" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.662885 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.786714 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzq88\" (UniqueName: \"kubernetes.io/projected/4ca20e88-39d9-463a-8a59-909b495d8a8b-kube-api-access-wzq88\") pod \"4ca20e88-39d9-463a-8a59-909b495d8a8b\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.786829 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-nb\") pod \"4ca20e88-39d9-463a-8a59-909b495d8a8b\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.786964 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-swift-storage-0\") pod \"4ca20e88-39d9-463a-8a59-909b495d8a8b\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.787001 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-sb\") pod \"4ca20e88-39d9-463a-8a59-909b495d8a8b\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.787931 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-config\") pod \"4ca20e88-39d9-463a-8a59-909b495d8a8b\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.787959 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-svc\") pod \"4ca20e88-39d9-463a-8a59-909b495d8a8b\" (UID: \"4ca20e88-39d9-463a-8a59-909b495d8a8b\") " Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.805984 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ca20e88-39d9-463a-8a59-909b495d8a8b-kube-api-access-wzq88" (OuterVolumeSpecName: "kube-api-access-wzq88") pod "4ca20e88-39d9-463a-8a59-909b495d8a8b" (UID: "4ca20e88-39d9-463a-8a59-909b495d8a8b"). InnerVolumeSpecName "kube-api-access-wzq88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.840154 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4ca20e88-39d9-463a-8a59-909b495d8a8b" (UID: "4ca20e88-39d9-463a-8a59-909b495d8a8b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.843972 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4ca20e88-39d9-463a-8a59-909b495d8a8b" (UID: "4ca20e88-39d9-463a-8a59-909b495d8a8b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.848784 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4ca20e88-39d9-463a-8a59-909b495d8a8b" (UID: "4ca20e88-39d9-463a-8a59-909b495d8a8b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.868679 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4ca20e88-39d9-463a-8a59-909b495d8a8b" (UID: "4ca20e88-39d9-463a-8a59-909b495d8a8b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.874918 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-config" (OuterVolumeSpecName: "config") pod "4ca20e88-39d9-463a-8a59-909b495d8a8b" (UID: "4ca20e88-39d9-463a-8a59-909b495d8a8b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.890561 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzq88\" (UniqueName: \"kubernetes.io/projected/4ca20e88-39d9-463a-8a59-909b495d8a8b-kube-api-access-wzq88\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.890605 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.890615 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.890669 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.890680 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:31:59 crc kubenswrapper[4926]: I1125 18:31:59.890688 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4ca20e88-39d9-463a-8a59-909b495d8a8b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:00 crc kubenswrapper[4926]: I1125 18:32:00.404525 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" event={"ID":"4ca20e88-39d9-463a-8a59-909b495d8a8b","Type":"ContainerDied","Data":"5395fb3717ba8c6918cee8fd87db92fa3dc6e173cd7f0db0f486d7e8eb31fef2"} Nov 25 18:32:00 crc kubenswrapper[4926]: I1125 18:32:00.404661 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" Nov 25 18:32:00 crc kubenswrapper[4926]: I1125 18:32:00.405010 4926 scope.go:117] "RemoveContainer" containerID="7f73d395b5a9159ab90c796cbd17a5c78b3543a9e23f95a8ef138fd3f5754fc7" Nov 25 18:32:00 crc kubenswrapper[4926]: I1125 18:32:00.462452 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f57d4bfc9-868kb"] Nov 25 18:32:00 crc kubenswrapper[4926]: I1125 18:32:00.473087 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f57d4bfc9-868kb"] Nov 25 18:32:00 crc kubenswrapper[4926]: I1125 18:32:00.647904 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:32:02 crc kubenswrapper[4926]: I1125 18:32:02.342439 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" path="/var/lib/kubelet/pods/4ca20e88-39d9-463a-8a59-909b495d8a8b/volumes" Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.436160 4926 generic.go:334] "Generic (PLEG): container finished" podID="e56a5d23-9046-44fb-b484-19e044ee5ab7" containerID="edb0ac343c5bfb9b8d22be56c5801ed4c081200df785481f097931f0dd999f3c" exitCode=0 Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.436270 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wvbc9" event={"ID":"e56a5d23-9046-44fb-b484-19e044ee5ab7","Type":"ContainerDied","Data":"edb0ac343c5bfb9b8d22be56c5801ed4c081200df785481f097931f0dd999f3c"} Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.436390 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6f57d4bfc9-868kb" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: i/o timeout" Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.542017 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.542116 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.542412 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.543213 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"07a87485769ab3c18f3d0de8b8428276c4f53380d423cecc3238c93bfce01c6d"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:32:03 crc kubenswrapper[4926]: I1125 18:32:03.543310 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://07a87485769ab3c18f3d0de8b8428276c4f53380d423cecc3238c93bfce01c6d" gracePeriod=600 Nov 25 18:32:04 crc kubenswrapper[4926]: I1125 18:32:04.451746 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="07a87485769ab3c18f3d0de8b8428276c4f53380d423cecc3238c93bfce01c6d" exitCode=0 Nov 25 18:32:04 crc kubenswrapper[4926]: I1125 18:32:04.451842 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"07a87485769ab3c18f3d0de8b8428276c4f53380d423cecc3238c93bfce01c6d"} Nov 25 18:32:05 crc kubenswrapper[4926]: I1125 18:32:05.648159 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.906863 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.912946 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.958447 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-combined-ca-bundle\") pod \"2cf27ee2-c953-4788-8738-5359f186de3d\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.958508 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-config-data\") pod \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.958712 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54l8z\" (UniqueName: \"kubernetes.io/projected/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-kube-api-access-54l8z\") pod \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.958758 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4djfn\" (UniqueName: \"kubernetes.io/projected/2cf27ee2-c953-4788-8738-5359f186de3d-kube-api-access-4djfn\") pod \"2cf27ee2-c953-4788-8738-5359f186de3d\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.958879 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cf27ee2-c953-4788-8738-5359f186de3d-logs\") pod \"2cf27ee2-c953-4788-8738-5359f186de3d\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.958934 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-config-data\") pod \"2cf27ee2-c953-4788-8738-5359f186de3d\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.958970 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-custom-prometheus-ca\") pod \"2cf27ee2-c953-4788-8738-5359f186de3d\" (UID: \"2cf27ee2-c953-4788-8738-5359f186de3d\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.959049 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-logs\") pod \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.959123 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-scripts\") pod \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.959196 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-horizon-secret-key\") pod \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\" (UID: \"a42599e0-4206-482d-8c8f-5d5e4a0e10ba\") " Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.960252 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-config-data" (OuterVolumeSpecName: "config-data") pod "a42599e0-4206-482d-8c8f-5d5e4a0e10ba" (UID: "a42599e0-4206-482d-8c8f-5d5e4a0e10ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.963649 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cf27ee2-c953-4788-8738-5359f186de3d-logs" (OuterVolumeSpecName: "logs") pod "2cf27ee2-c953-4788-8738-5359f186de3d" (UID: "2cf27ee2-c953-4788-8738-5359f186de3d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.964395 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-scripts" (OuterVolumeSpecName: "scripts") pod "a42599e0-4206-482d-8c8f-5d5e4a0e10ba" (UID: "a42599e0-4206-482d-8c8f-5d5e4a0e10ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.965584 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-logs" (OuterVolumeSpecName: "logs") pod "a42599e0-4206-482d-8c8f-5d5e4a0e10ba" (UID: "a42599e0-4206-482d-8c8f-5d5e4a0e10ba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.966821 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a42599e0-4206-482d-8c8f-5d5e4a0e10ba" (UID: "a42599e0-4206-482d-8c8f-5d5e4a0e10ba"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.967164 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cf27ee2-c953-4788-8738-5359f186de3d-kube-api-access-4djfn" (OuterVolumeSpecName: "kube-api-access-4djfn") pod "2cf27ee2-c953-4788-8738-5359f186de3d" (UID: "2cf27ee2-c953-4788-8738-5359f186de3d"). InnerVolumeSpecName "kube-api-access-4djfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.976993 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-kube-api-access-54l8z" (OuterVolumeSpecName: "kube-api-access-54l8z") pod "a42599e0-4206-482d-8c8f-5d5e4a0e10ba" (UID: "a42599e0-4206-482d-8c8f-5d5e4a0e10ba"). InnerVolumeSpecName "kube-api-access-54l8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:07 crc kubenswrapper[4926]: I1125 18:32:07.998215 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "2cf27ee2-c953-4788-8738-5359f186de3d" (UID: "2cf27ee2-c953-4788-8738-5359f186de3d"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.002798 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2cf27ee2-c953-4788-8738-5359f186de3d" (UID: "2cf27ee2-c953-4788-8738-5359f186de3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.034796 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-config-data" (OuterVolumeSpecName: "config-data") pod "2cf27ee2-c953-4788-8738-5359f186de3d" (UID: "2cf27ee2-c953-4788-8738-5359f186de3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062193 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062237 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062252 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062264 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062276 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54l8z\" (UniqueName: \"kubernetes.io/projected/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-kube-api-access-54l8z\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062289 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4djfn\" (UniqueName: \"kubernetes.io/projected/2cf27ee2-c953-4788-8738-5359f186de3d-kube-api-access-4djfn\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062299 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cf27ee2-c953-4788-8738-5359f186de3d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062311 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062322 4926 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/2cf27ee2-c953-4788-8738-5359f186de3d-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.062334 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a42599e0-4206-482d-8c8f-5d5e4a0e10ba-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.326622 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.328355 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.328599 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.328794 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:38.102.83.27:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n9ch5dchf4h96h597h659h5d4h578h5ch59fh59dh76h5d7h647h644h545h5ddh5d5h649h558h55fh555h646h5cch7ch55fh5d6h568h5d8h88h687h5bq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9gw92,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(102b4780-5da7-4b86-9679-e87417b4ee5a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.438849 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtcbh\" (UniqueName: \"kubernetes.io/projected/3305e621-41e4-4b90-b63d-72dceab97c95-kube-api-access-vtcbh\") pod \"3305e621-41e4-4b90-b63d-72dceab97c95\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.439062 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-scripts\") pod \"3305e621-41e4-4b90-b63d-72dceab97c95\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.439144 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-config-data\") pod \"3305e621-41e4-4b90-b63d-72dceab97c95\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.439400 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-fernet-keys\") pod \"3305e621-41e4-4b90-b63d-72dceab97c95\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.439600 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-combined-ca-bundle\") pod \"3305e621-41e4-4b90-b63d-72dceab97c95\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.439735 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-credential-keys\") pod \"3305e621-41e4-4b90-b63d-72dceab97c95\" (UID: \"3305e621-41e4-4b90-b63d-72dceab97c95\") " Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.457676 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3305e621-41e4-4b90-b63d-72dceab97c95" (UID: "3305e621-41e4-4b90-b63d-72dceab97c95"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.463384 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-scripts" (OuterVolumeSpecName: "scripts") pod "3305e621-41e4-4b90-b63d-72dceab97c95" (UID: "3305e621-41e4-4b90-b63d-72dceab97c95"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.470689 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3305e621-41e4-4b90-b63d-72dceab97c95" (UID: "3305e621-41e4-4b90-b63d-72dceab97c95"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.494842 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3305e621-41e4-4b90-b63d-72dceab97c95-kube-api-access-vtcbh" (OuterVolumeSpecName: "kube-api-access-vtcbh") pod "3305e621-41e4-4b90-b63d-72dceab97c95" (UID: "3305e621-41e4-4b90-b63d-72dceab97c95"). InnerVolumeSpecName "kube-api-access-vtcbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.497638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"2cf27ee2-c953-4788-8738-5359f186de3d","Type":"ContainerDied","Data":"b29328dc7e142b9d789f40dec47baeb25e5e7b6069b8446586e90c6dcbd7f1aa"} Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.497791 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.519408 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7z5gz" event={"ID":"3305e621-41e4-4b90-b63d-72dceab97c95","Type":"ContainerDied","Data":"d79cfcd0c21c9c060df668353ce36c65d533e4dd27a0859191931bd47e44f11a"} Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.519464 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d79cfcd0c21c9c060df668353ce36c65d533e4dd27a0859191931bd47e44f11a" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.519551 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7z5gz" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.520519 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3305e621-41e4-4b90-b63d-72dceab97c95" (UID: "3305e621-41e4-4b90-b63d-72dceab97c95"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.526007 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-config-data" (OuterVolumeSpecName: "config-data") pod "3305e621-41e4-4b90-b63d-72dceab97c95" (UID: "3305e621-41e4-4b90-b63d-72dceab97c95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.542717 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.542802 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.545810 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-586d54b5d5-wchlr" event={"ID":"a42599e0-4206-482d-8c8f-5d5e4a0e10ba","Type":"ContainerDied","Data":"cee5a93502e0613148981c72e6fe2b7fdc57785b90b01b8985d0ba915b5e5a4d"} Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.545930 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-586d54b5d5-wchlr" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.554213 4926 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.554239 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtcbh\" (UniqueName: \"kubernetes.io/projected/3305e621-41e4-4b90-b63d-72dceab97c95-kube-api-access-vtcbh\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.554249 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.554260 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.554270 4926 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.554280 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305e621-41e4-4b90-b63d-72dceab97c95-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.558874 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.559522 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.559567 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.559586 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api-log" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.559593 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api-log" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.559602 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="init" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.559633 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="init" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.559654 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="dnsmasq-dns" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.559660 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="dnsmasq-dns" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.559834 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3305e621-41e4-4b90-b63d-72dceab97c95" containerName="keystone-bootstrap" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.559845 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3305e621-41e4-4b90-b63d-72dceab97c95" containerName="keystone-bootstrap" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.560150 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ca20e88-39d9-463a-8a59-909b495d8a8b" containerName="dnsmasq-dns" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.560167 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.560209 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3305e621-41e4-4b90-b63d-72dceab97c95" containerName="keystone-bootstrap" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.560222 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api-log" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.561634 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.573666 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.575543 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.605243 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-586d54b5d5-wchlr"] Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.615168 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-586d54b5d5-wchlr"] Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.657269 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgfmv\" (UniqueName: \"kubernetes.io/projected/b757a4b3-7f06-4254-b5ca-86f0b0d05234-kube-api-access-pgfmv\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.657434 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-config-data\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.657461 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.657519 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.657547 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b757a4b3-7f06-4254-b5ca-86f0b0d05234-logs\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.759738 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-config-data\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.759791 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.759842 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.759867 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b757a4b3-7f06-4254-b5ca-86f0b0d05234-logs\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.759952 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgfmv\" (UniqueName: \"kubernetes.io/projected/b757a4b3-7f06-4254-b5ca-86f0b0d05234-kube-api-access-pgfmv\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.763108 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b757a4b3-7f06-4254-b5ca-86f0b0d05234-logs\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.764970 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.766121 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.767612 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-config-data\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.778135 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgfmv\" (UniqueName: \"kubernetes.io/projected/b757a4b3-7f06-4254-b5ca-86f0b0d05234-kube-api-access-pgfmv\") pod \"watcher-api-0\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: I1125 18:32:08.890520 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.961962 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-barbican-api:watcher_latest" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.962057 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-barbican-api:watcher_latest" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.962171 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:38.102.83.27:5001/podified-master-centos10/openstack-barbican-api:watcher_latest,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sr9c7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-x8d9g_openstack(017caf97-9f18-49b8-b6e6-597c709e3420): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:32:08 crc kubenswrapper[4926]: E1125 18:32:08.963800 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-x8d9g" podUID="017caf97-9f18-49b8-b6e6-597c709e3420" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.067218 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.076131 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wvbc9" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.167771 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48rgt\" (UniqueName: \"kubernetes.io/projected/e56a5d23-9046-44fb-b484-19e044ee5ab7-kube-api-access-48rgt\") pod \"e56a5d23-9046-44fb-b484-19e044ee5ab7\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.167906 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-config-data\") pod \"906ba89b-3a3b-4823-9f86-b7c9664277cd\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.167946 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/906ba89b-3a3b-4823-9f86-b7c9664277cd-horizon-secret-key\") pod \"906ba89b-3a3b-4823-9f86-b7c9664277cd\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.167966 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-scripts\") pod \"906ba89b-3a3b-4823-9f86-b7c9664277cd\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.168011 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-db-sync-config-data\") pod \"e56a5d23-9046-44fb-b484-19e044ee5ab7\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.168092 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906ba89b-3a3b-4823-9f86-b7c9664277cd-logs\") pod \"906ba89b-3a3b-4823-9f86-b7c9664277cd\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.168121 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf2tn\" (UniqueName: \"kubernetes.io/projected/906ba89b-3a3b-4823-9f86-b7c9664277cd-kube-api-access-gf2tn\") pod \"906ba89b-3a3b-4823-9f86-b7c9664277cd\" (UID: \"906ba89b-3a3b-4823-9f86-b7c9664277cd\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.168197 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-combined-ca-bundle\") pod \"e56a5d23-9046-44fb-b484-19e044ee5ab7\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.168332 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-config-data\") pod \"e56a5d23-9046-44fb-b484-19e044ee5ab7\" (UID: \"e56a5d23-9046-44fb-b484-19e044ee5ab7\") " Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.168790 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-config-data" (OuterVolumeSpecName: "config-data") pod "906ba89b-3a3b-4823-9f86-b7c9664277cd" (UID: "906ba89b-3a3b-4823-9f86-b7c9664277cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.170510 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-scripts" (OuterVolumeSpecName: "scripts") pod "906ba89b-3a3b-4823-9f86-b7c9664277cd" (UID: "906ba89b-3a3b-4823-9f86-b7c9664277cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.170780 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/906ba89b-3a3b-4823-9f86-b7c9664277cd-logs" (OuterVolumeSpecName: "logs") pod "906ba89b-3a3b-4823-9f86-b7c9664277cd" (UID: "906ba89b-3a3b-4823-9f86-b7c9664277cd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.172960 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e56a5d23-9046-44fb-b484-19e044ee5ab7" (UID: "e56a5d23-9046-44fb-b484-19e044ee5ab7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.174225 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/906ba89b-3a3b-4823-9f86-b7c9664277cd-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "906ba89b-3a3b-4823-9f86-b7c9664277cd" (UID: "906ba89b-3a3b-4823-9f86-b7c9664277cd"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.176420 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e56a5d23-9046-44fb-b484-19e044ee5ab7-kube-api-access-48rgt" (OuterVolumeSpecName: "kube-api-access-48rgt") pod "e56a5d23-9046-44fb-b484-19e044ee5ab7" (UID: "e56a5d23-9046-44fb-b484-19e044ee5ab7"). InnerVolumeSpecName "kube-api-access-48rgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.177196 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/906ba89b-3a3b-4823-9f86-b7c9664277cd-kube-api-access-gf2tn" (OuterVolumeSpecName: "kube-api-access-gf2tn") pod "906ba89b-3a3b-4823-9f86-b7c9664277cd" (UID: "906ba89b-3a3b-4823-9f86-b7c9664277cd"). InnerVolumeSpecName "kube-api-access-gf2tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.217362 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e56a5d23-9046-44fb-b484-19e044ee5ab7" (UID: "e56a5d23-9046-44fb-b484-19e044ee5ab7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.242249 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-config-data" (OuterVolumeSpecName: "config-data") pod "e56a5d23-9046-44fb-b484-19e044ee5ab7" (UID: "e56a5d23-9046-44fb-b484-19e044ee5ab7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270302 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270347 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/906ba89b-3a3b-4823-9f86-b7c9664277cd-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270362 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/906ba89b-3a3b-4823-9f86-b7c9664277cd-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270394 4926 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270406 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906ba89b-3a3b-4823-9f86-b7c9664277cd-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270418 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf2tn\" (UniqueName: \"kubernetes.io/projected/906ba89b-3a3b-4823-9f86-b7c9664277cd-kube-api-access-gf2tn\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270429 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270443 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56a5d23-9046-44fb-b484-19e044ee5ab7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.270455 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48rgt\" (UniqueName: \"kubernetes.io/projected/e56a5d23-9046-44fb-b484-19e044ee5ab7-kube-api-access-48rgt\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.411439 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-7z5gz"] Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.419075 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-7z5gz"] Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.550223 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-52ncr"] Nov 25 18:32:09 crc kubenswrapper[4926]: E1125 18:32:09.550654 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e56a5d23-9046-44fb-b484-19e044ee5ab7" containerName="glance-db-sync" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.550670 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e56a5d23-9046-44fb-b484-19e044ee5ab7" containerName="glance-db-sync" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.550855 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e56a5d23-9046-44fb-b484-19e044ee5ab7" containerName="glance-db-sync" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.551560 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.555258 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2fxt6" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.555605 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.556326 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.556788 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.556976 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.561285 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65d76cdbdf-vktzr" event={"ID":"906ba89b-3a3b-4823-9f86-b7c9664277cd","Type":"ContainerDied","Data":"c3414176a5cb1e06b2cb1144753c8f4c31692cbce99231da36db67872a2c0ee8"} Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.561332 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65d76cdbdf-vktzr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.563784 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-52ncr"] Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.582007 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wvbc9" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.582515 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wvbc9" event={"ID":"e56a5d23-9046-44fb-b484-19e044ee5ab7","Type":"ContainerDied","Data":"a31b7d2eb8d8bdc000229bdcbb6b93708eff83a4af68852b7393403804b87216"} Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.582575 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a31b7d2eb8d8bdc000229bdcbb6b93708eff83a4af68852b7393403804b87216" Nov 25 18:32:09 crc kubenswrapper[4926]: E1125 18:32:09.584606 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-barbican-api:watcher_latest\\\"\"" pod="openstack/barbican-db-sync-x8d9g" podUID="017caf97-9f18-49b8-b6e6-597c709e3420" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.607027 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-combined-ca-bundle\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.607283 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmscx\" (UniqueName: \"kubernetes.io/projected/f2205cef-0292-4e62-b282-0acd7f50f920-kube-api-access-hmscx\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.607452 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-scripts\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.607552 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-credential-keys\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.607585 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-fernet-keys\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.607665 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-config-data\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.645411 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-65d76cdbdf-vktzr"] Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.659740 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-65d76cdbdf-vktzr"] Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.708853 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-credential-keys\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.708900 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-fernet-keys\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.708925 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-config-data\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.708978 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-combined-ca-bundle\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.709032 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmscx\" (UniqueName: \"kubernetes.io/projected/f2205cef-0292-4e62-b282-0acd7f50f920-kube-api-access-hmscx\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.709065 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-scripts\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.713768 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-scripts\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.714158 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-fernet-keys\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.720435 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-config-data\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.721885 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-credential-keys\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.721898 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-combined-ca-bundle\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.724631 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmscx\" (UniqueName: \"kubernetes.io/projected/f2205cef-0292-4e62-b282-0acd7f50f920-kube-api-access-hmscx\") pod \"keystone-bootstrap-52ncr\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:09 crc kubenswrapper[4926]: I1125 18:32:09.879421 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.344955 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" path="/var/lib/kubelet/pods/2cf27ee2-c953-4788-8738-5359f186de3d/volumes" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.345920 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3305e621-41e4-4b90-b63d-72dceab97c95" path="/var/lib/kubelet/pods/3305e621-41e4-4b90-b63d-72dceab97c95/volumes" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.346827 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="906ba89b-3a3b-4823-9f86-b7c9664277cd" path="/var/lib/kubelet/pods/906ba89b-3a3b-4823-9f86-b7c9664277cd/volumes" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.351034 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a42599e0-4206-482d-8c8f-5d5e4a0e10ba" path="/var/lib/kubelet/pods/a42599e0-4206-482d-8c8f-5d5e4a0e10ba/volumes" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.374715 4926 scope.go:117] "RemoveContainer" containerID="78b61847467f43cbcd7b952943154cba4e03f7604006089c66baa873c4571338" Nov 25 18:32:10 crc kubenswrapper[4926]: E1125 18:32:10.500617 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Nov 25 18:32:10 crc kubenswrapper[4926]: E1125 18:32:10.501080 4926 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.27:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Nov 25 18:32:10 crc kubenswrapper[4926]: E1125 18:32:10.501215 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:38.102.83.27:5001/podified-master-centos10/openstack-cinder-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lpc52,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-jnbt6_openstack(0138f286-e018-42de-b145-2cda09144394): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 18:32:10 crc kubenswrapper[4926]: E1125 18:32:10.502720 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-jnbt6" podUID="0138f286-e018-42de-b145-2cda09144394" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.572486 4926 scope.go:117] "RemoveContainer" containerID="2a18d78481ed56ee9bbb8c78eb19b76e596d9f89645390e0fbbcd5362fea71a4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.573834 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c57575f65-q94t4"] Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.578703 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.639285 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c57575f65-q94t4"] Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.649303 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="2cf27ee2-c953-4788-8738-5359f186de3d" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.735124 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-nb\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.735644 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b794l\" (UniqueName: \"kubernetes.io/projected/673cc698-685c-4cfa-b777-3ea0b418ec78-kube-api-access-b794l\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.735674 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-svc\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.735900 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-sb\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.735927 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-config\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.735956 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-swift-storage-0\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: E1125 18:32:10.759363 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.27:5001/podified-master-centos10/openstack-cinder-api:watcher_latest\\\"\"" pod="openstack/cinder-db-sync-jnbt6" podUID="0138f286-e018-42de-b145-2cda09144394" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.843341 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-swift-storage-0\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.843519 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-nb\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.843638 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b794l\" (UniqueName: \"kubernetes.io/projected/673cc698-685c-4cfa-b777-3ea0b418ec78-kube-api-access-b794l\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.843678 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-svc\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.843706 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-sb\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.843743 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-config\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.845108 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-config\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.845362 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-svc\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.845511 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-nb\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.846015 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-sb\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.846104 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-swift-storage-0\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.885205 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b794l\" (UniqueName: \"kubernetes.io/projected/673cc698-685c-4cfa-b777-3ea0b418ec78-kube-api-access-b794l\") pod \"dnsmasq-dns-6c57575f65-q94t4\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:10 crc kubenswrapper[4926]: I1125 18:32:10.932835 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.266882 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-765875bb4b-tr7fm"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.406076 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58ffdb7978-lnv9j"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.448288 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-52ncr"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.495346 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.589542 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.591135 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.596760 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.597073 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.597179 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mzhrc" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.621076 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.697595 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.699167 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.701830 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.717783 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.773182 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-scripts\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.773434 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.773636 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-logs\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.773722 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlh2z\" (UniqueName: \"kubernetes.io/projected/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-kube-api-access-mlh2z\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.773910 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-config-data\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.773988 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.774019 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.879864 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.879972 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880024 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-scripts\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880094 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880257 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880303 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-logs\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880336 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880418 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880493 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cfwx\" (UniqueName: \"kubernetes.io/projected/bef25546-2579-4486-aa95-5dcd3a72c1f8-kube-api-access-9cfwx\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880531 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-logs\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880590 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlh2z\" (UniqueName: \"kubernetes.io/projected/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-kube-api-access-mlh2z\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880678 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-config-data\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880729 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880767 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.880841 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.882045 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-logs\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.882112 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.892877 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-scripts\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.896643 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.914327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-config-data\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.916003 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlh2z\" (UniqueName: \"kubernetes.io/projected/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-kube-api-access-mlh2z\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.940299 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.983581 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.983638 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.983656 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.983712 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.983727 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-logs\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.983748 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.983781 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cfwx\" (UniqueName: \"kubernetes.io/projected/bef25546-2579-4486-aa95-5dcd3a72c1f8-kube-api-access-9cfwx\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.984016 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.984330 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-logs\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.984680 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.988494 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.988969 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:11 crc kubenswrapper[4926]: I1125 18:32:11.989741 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:12 crc kubenswrapper[4926]: I1125 18:32:12.001621 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cfwx\" (UniqueName: \"kubernetes.io/projected/bef25546-2579-4486-aa95-5dcd3a72c1f8-kube-api-access-9cfwx\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:12 crc kubenswrapper[4926]: I1125 18:32:12.008612 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:12 crc kubenswrapper[4926]: I1125 18:32:12.037761 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:12 crc kubenswrapper[4926]: I1125 18:32:12.220528 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:32:13 crc kubenswrapper[4926]: I1125 18:32:13.498663 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:13 crc kubenswrapper[4926]: W1125 18:32:13.582983 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb757a4b3_7f06_4254_b5ca_86f0b0d05234.slice/crio-011c7379c76f8e6b8f6eae7c49453992a73c4f6987df0da8aad7252787686e62 WatchSource:0}: Error finding container 011c7379c76f8e6b8f6eae7c49453992a73c4f6987df0da8aad7252787686e62: Status 404 returned error can't find the container with id 011c7379c76f8e6b8f6eae7c49453992a73c4f6987df0da8aad7252787686e62 Nov 25 18:32:13 crc kubenswrapper[4926]: W1125 18:32:13.591542 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2205cef_0292_4e62_b282_0acd7f50f920.slice/crio-896708d999cada4601c257280a0b02d31a51faf55b707f530c2b4f8582054471 WatchSource:0}: Error finding container 896708d999cada4601c257280a0b02d31a51faf55b707f530c2b4f8582054471: Status 404 returned error can't find the container with id 896708d999cada4601c257280a0b02d31a51faf55b707f530c2b4f8582054471 Nov 25 18:32:13 crc kubenswrapper[4926]: I1125 18:32:13.613495 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:13 crc kubenswrapper[4926]: I1125 18:32:13.666355 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58ffdb7978-lnv9j" event={"ID":"f0edd267-7b26-44cc-a576-552e8ff49e66","Type":"ContainerStarted","Data":"aa3e9a8aedf5f421716a1444b0f5ac869f53a47d3e2a08a6e2459b6b1bf73f9e"} Nov 25 18:32:13 crc kubenswrapper[4926]: I1125 18:32:13.667959 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-765875bb4b-tr7fm" event={"ID":"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55","Type":"ContainerStarted","Data":"6d9953637e4afd041f4ba7cd4a9c0596713e3ad4ae424d5475ddf15b63765128"} Nov 25 18:32:13 crc kubenswrapper[4926]: I1125 18:32:13.668949 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b757a4b3-7f06-4254-b5ca-86f0b0d05234","Type":"ContainerStarted","Data":"011c7379c76f8e6b8f6eae7c49453992a73c4f6987df0da8aad7252787686e62"} Nov 25 18:32:13 crc kubenswrapper[4926]: I1125 18:32:13.670286 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-52ncr" event={"ID":"f2205cef-0292-4e62-b282-0acd7f50f920","Type":"ContainerStarted","Data":"896708d999cada4601c257280a0b02d31a51faf55b707f530c2b4f8582054471"} Nov 25 18:32:13 crc kubenswrapper[4926]: I1125 18:32:13.816759 4926 scope.go:117] "RemoveContainer" containerID="ae7f365180802b10bfc7603c1f7623792af2a90712e0641ca4103520eea1f998" Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.000816 4926 scope.go:117] "RemoveContainer" containerID="00b7aea7b91cbbd776bb314783a455b9a5d1db3bfe52f1cadde1d772c2707351" Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.465065 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c57575f65-q94t4"] Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.678196 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.747761 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"a8f56314785fa968a3a105a23d5d2b50a67b5ca02a86eb00cd6083866de84208"} Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.781503 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9","Type":"ContainerStarted","Data":"f6e0cd719af7e50e84bd95403eb081833b04d346a00110ba117574648b791d8c"} Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.801066 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerStarted","Data":"470e3756a0af50afe7d5c93b51683a49c9eecdbd66fa37c5c6cc96a5a8c73033"} Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.859996 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.863215 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pvqdw" event={"ID":"4addbd96-e765-4c9c-b260-5a80700849d2","Type":"ContainerStarted","Data":"ceac441c09b1ab45b04be4d908d2fa69eb4a3016e0432b031f87cc78b7d3ce19"} Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.869844 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-765875bb4b-tr7fm" event={"ID":"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55","Type":"ContainerStarted","Data":"1cf829482d4e11d816d3102c1a6f63cdade69e841f157c7d4541ecf4b0799142"} Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.893840 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=8.253808877 podStartE2EDuration="39.893818523s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="2025-11-25 18:31:36.667355752 +0000 UTC m=+1127.052869357" lastFinishedPulling="2025-11-25 18:32:08.307365398 +0000 UTC m=+1158.692879003" observedRunningTime="2025-11-25 18:32:14.818441748 +0000 UTC m=+1165.203955353" watchObservedRunningTime="2025-11-25 18:32:14.893818523 +0000 UTC m=+1165.279332128" Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.900617 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-pvqdw" podStartSLOduration=3.690057129 podStartE2EDuration="39.900597994s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="2025-11-25 18:31:37.412653896 +0000 UTC m=+1127.798167501" lastFinishedPulling="2025-11-25 18:32:13.623194761 +0000 UTC m=+1164.008708366" observedRunningTime="2025-11-25 18:32:14.881017237 +0000 UTC m=+1165.266530842" watchObservedRunningTime="2025-11-25 18:32:14.900597994 +0000 UTC m=+1165.286111599" Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.915395 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b757a4b3-7f06-4254-b5ca-86f0b0d05234","Type":"ContainerStarted","Data":"007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6"} Nov 25 18:32:14 crc kubenswrapper[4926]: I1125 18:32:14.960214 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"67831a6a-38bf-4f8c-98a8-c8cb0274e218","Type":"ContainerStarted","Data":"3caad3e7c74cab7798c34f391f1cd40df720d56b8e0eca2a300db717507dd858"} Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.005197 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" event={"ID":"673cc698-685c-4cfa-b777-3ea0b418ec78","Type":"ContainerStarted","Data":"60a5d6d12ff36af0bffdbbe5322c8602d0a81955990ca5420f7242dca9c27ed0"} Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.022503 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd689864c-5qb7v" event={"ID":"166aef77-b73a-497a-886d-a66a548bff2d","Type":"ContainerStarted","Data":"a63627d9165dbcc99c0946b1b85b68d0edd0b9c9870131543642a2ed93d7f691"} Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.041282 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-52ncr" event={"ID":"f2205cef-0292-4e62-b282-0acd7f50f920","Type":"ContainerStarted","Data":"3f0799115ea5976d342803d3ec287b7e11216070aa59088280c0f0799da32aef"} Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.057557 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58ffdb7978-lnv9j" event={"ID":"f0edd267-7b26-44cc-a576-552e8ff49e66","Type":"ContainerStarted","Data":"8b98ee28953ee7185fc8d83aa1d45618e7a50a17cbe5d0f7b209b7c418fff28e"} Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.076170 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=8.444818489 podStartE2EDuration="40.076144415s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="2025-11-25 18:31:37.33050012 +0000 UTC m=+1127.716013725" lastFinishedPulling="2025-11-25 18:32:08.961826036 +0000 UTC m=+1159.347339651" observedRunningTime="2025-11-25 18:32:14.990214471 +0000 UTC m=+1165.375728076" watchObservedRunningTime="2025-11-25 18:32:15.076144415 +0000 UTC m=+1165.461658010" Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.578689 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.611583 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.644790 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-52ncr" podStartSLOduration=6.644767661 podStartE2EDuration="6.644767661s" podCreationTimestamp="2025-11-25 18:32:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:15.07478144 +0000 UTC m=+1165.460295065" watchObservedRunningTime="2025-11-25 18:32:15.644767661 +0000 UTC m=+1166.030281266" Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.982092 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Nov 25 18:32:15 crc kubenswrapper[4926]: I1125 18:32:15.982144 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.073244 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b","Type":"ContainerStarted","Data":"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.073296 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b","Type":"ContainerStarted","Data":"80280509b395057a299306f1210b8447cd19fb068e0ace79b3930315d9acf1a8"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.087900 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58ffdb7978-lnv9j" event={"ID":"f0edd267-7b26-44cc-a576-552e8ff49e66","Type":"ContainerStarted","Data":"37920c999f0409449ccf10a910efd483e3b3435103f587715a461c3f65c316b3"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.098413 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.099721 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-765875bb4b-tr7fm" event={"ID":"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55","Type":"ContainerStarted","Data":"852a73a980fb903c7c9cee66c146d5af7a6a6f5e72cce623c7519f6abb47b4ff"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.103968 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b757a4b3-7f06-4254-b5ca-86f0b0d05234","Type":"ContainerStarted","Data":"7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.104788 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.118940 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bef25546-2579-4486-aa95-5dcd3a72c1f8","Type":"ContainerStarted","Data":"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.118994 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bef25546-2579-4486-aa95-5dcd3a72c1f8","Type":"ContainerStarted","Data":"ef27fda2bf71c391436771a5e27fdd6fae1a7cf18114067cd6e64de409294f4a"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.128937 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-58ffdb7978-lnv9j" podStartSLOduration=31.12891491 podStartE2EDuration="31.12891491s" podCreationTimestamp="2025-11-25 18:31:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:16.109826046 +0000 UTC m=+1166.495339651" watchObservedRunningTime="2025-11-25 18:32:16.12891491 +0000 UTC m=+1166.514428515" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.139233 4926 generic.go:334] "Generic (PLEG): container finished" podID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerID="bbab1e4f1fc9f6ce89ff733d282ac02e76078f7ef3cf555187ccd93a43022f92" exitCode=0 Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.139347 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" event={"ID":"673cc698-685c-4cfa-b777-3ea0b418ec78","Type":"ContainerDied","Data":"bbab1e4f1fc9f6ce89ff733d282ac02e76078f7ef3cf555187ccd93a43022f92"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.161639 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=8.161615101 podStartE2EDuration="8.161615101s" podCreationTimestamp="2025-11-25 18:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:16.147186015 +0000 UTC m=+1166.532699620" watchObservedRunningTime="2025-11-25 18:32:16.161615101 +0000 UTC m=+1166.547128706" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.170888 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd689864c-5qb7v" event={"ID":"166aef77-b73a-497a-886d-a66a548bff2d","Type":"ContainerStarted","Data":"ffe20d0efc9a99274bf3de7e22f73e790bad18f6d2bf38dcedd11ca992f5dd59"} Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.170924 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7fd689864c-5qb7v" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon-log" containerID="cri-o://a63627d9165dbcc99c0946b1b85b68d0edd0b9c9870131543642a2ed93d7f691" gracePeriod=30 Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.171334 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.171426 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7fd689864c-5qb7v" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon" containerID="cri-o://ffe20d0efc9a99274bf3de7e22f73e790bad18f6d2bf38dcedd11ca992f5dd59" gracePeriod=30 Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.189482 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-765875bb4b-tr7fm" podStartSLOduration=32.189463899 podStartE2EDuration="32.189463899s" podCreationTimestamp="2025-11-25 18:31:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:16.180140892 +0000 UTC m=+1166.565654497" watchObservedRunningTime="2025-11-25 18:32:16.189463899 +0000 UTC m=+1166.574977504" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.242539 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7fd689864c-5qb7v" podStartSLOduration=7.42203657 podStartE2EDuration="38.242512877s" podCreationTimestamp="2025-11-25 18:31:38 +0000 UTC" firstStartedPulling="2025-11-25 18:31:39.610082713 +0000 UTC m=+1129.995596318" lastFinishedPulling="2025-11-25 18:32:10.43055902 +0000 UTC m=+1160.816072625" observedRunningTime="2025-11-25 18:32:16.207568059 +0000 UTC m=+1166.593081664" watchObservedRunningTime="2025-11-25 18:32:16.242512877 +0000 UTC m=+1166.628026482" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.271090 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.271890 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.325167 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:32:16 crc kubenswrapper[4926]: I1125 18:32:16.382638 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.176087 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b","Type":"ContainerStarted","Data":"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd"} Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.176690 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-log" containerID="cri-o://feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c" gracePeriod=30 Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.177298 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-httpd" containerID="cri-o://cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd" gracePeriod=30 Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.184199 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bef25546-2579-4486-aa95-5dcd3a72c1f8","Type":"ContainerStarted","Data":"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb"} Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.184342 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-log" containerID="cri-o://62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89" gracePeriod=30 Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.184365 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-httpd" containerID="cri-o://20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb" gracePeriod=30 Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.191649 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" event={"ID":"673cc698-685c-4cfa-b777-3ea0b418ec78","Type":"ContainerStarted","Data":"5863d8d9506af073cfb36662e06645b349ffc1efac9f25630d7aba970161a592"} Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.191861 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.206549 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.206522808 podStartE2EDuration="7.206522808s" podCreationTimestamp="2025-11-25 18:32:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:17.194216406 +0000 UTC m=+1167.579730011" watchObservedRunningTime="2025-11-25 18:32:17.206522808 +0000 UTC m=+1167.592036403" Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.232601 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.232354814 podStartE2EDuration="7.232354814s" podCreationTimestamp="2025-11-25 18:32:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:17.222928335 +0000 UTC m=+1167.608441960" watchObservedRunningTime="2025-11-25 18:32:17.232354814 +0000 UTC m=+1167.617868419" Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.276834 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" podStartSLOduration=7.276813284 podStartE2EDuration="7.276813284s" podCreationTimestamp="2025-11-25 18:32:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:17.244052412 +0000 UTC m=+1167.629566007" watchObservedRunningTime="2025-11-25 18:32:17.276813284 +0000 UTC m=+1167.662326889" Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.886199 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.988849 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-combined-ca-bundle\") pod \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.989333 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlh2z\" (UniqueName: \"kubernetes.io/projected/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-kube-api-access-mlh2z\") pod \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.989405 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-config-data\") pod \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.989478 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-httpd-run\") pod \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.989517 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.989555 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-logs\") pod \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.989584 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-scripts\") pod \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\" (UID: \"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b\") " Nov 25 18:32:17 crc kubenswrapper[4926]: I1125 18:32:17.997384 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" (UID: "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.000492 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-logs" (OuterVolumeSpecName: "logs") pod "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" (UID: "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.024495 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-kube-api-access-mlh2z" (OuterVolumeSpecName: "kube-api-access-mlh2z") pod "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" (UID: "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b"). InnerVolumeSpecName "kube-api-access-mlh2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.024673 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-scripts" (OuterVolumeSpecName: "scripts") pod "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" (UID: "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.033637 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" (UID: "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.039475 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" (UID: "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.086548 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.091661 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.091702 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.091714 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.091724 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.091732 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.091742 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlh2z\" (UniqueName: \"kubernetes.io/projected/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-kube-api-access-mlh2z\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.114749 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.152258 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-config-data" (OuterVolumeSpecName: "config-data") pod "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" (UID: "27cdfb5f-7604-405b-b4cd-33eeb3b8c09b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193049 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"bef25546-2579-4486-aa95-5dcd3a72c1f8\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193208 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-httpd-run\") pod \"bef25546-2579-4486-aa95-5dcd3a72c1f8\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193258 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-logs\") pod \"bef25546-2579-4486-aa95-5dcd3a72c1f8\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193284 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-combined-ca-bundle\") pod \"bef25546-2579-4486-aa95-5dcd3a72c1f8\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193407 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cfwx\" (UniqueName: \"kubernetes.io/projected/bef25546-2579-4486-aa95-5dcd3a72c1f8-kube-api-access-9cfwx\") pod \"bef25546-2579-4486-aa95-5dcd3a72c1f8\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193454 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-config-data\") pod \"bef25546-2579-4486-aa95-5dcd3a72c1f8\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193472 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-scripts\") pod \"bef25546-2579-4486-aa95-5dcd3a72c1f8\" (UID: \"bef25546-2579-4486-aa95-5dcd3a72c1f8\") " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193929 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.193950 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.194631 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bef25546-2579-4486-aa95-5dcd3a72c1f8" (UID: "bef25546-2579-4486-aa95-5dcd3a72c1f8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.196754 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-logs" (OuterVolumeSpecName: "logs") pod "bef25546-2579-4486-aa95-5dcd3a72c1f8" (UID: "bef25546-2579-4486-aa95-5dcd3a72c1f8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.211099 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef25546-2579-4486-aa95-5dcd3a72c1f8-kube-api-access-9cfwx" (OuterVolumeSpecName: "kube-api-access-9cfwx") pod "bef25546-2579-4486-aa95-5dcd3a72c1f8" (UID: "bef25546-2579-4486-aa95-5dcd3a72c1f8"). InnerVolumeSpecName "kube-api-access-9cfwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.211304 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "bef25546-2579-4486-aa95-5dcd3a72c1f8" (UID: "bef25546-2579-4486-aa95-5dcd3a72c1f8"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.220462 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-scripts" (OuterVolumeSpecName: "scripts") pod "bef25546-2579-4486-aa95-5dcd3a72c1f8" (UID: "bef25546-2579-4486-aa95-5dcd3a72c1f8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.227358 4926 generic.go:334] "Generic (PLEG): container finished" podID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerID="20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb" exitCode=143 Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.227412 4926 generic.go:334] "Generic (PLEG): container finished" podID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerID="62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89" exitCode=143 Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.227454 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bef25546-2579-4486-aa95-5dcd3a72c1f8","Type":"ContainerDied","Data":"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb"} Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.227492 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bef25546-2579-4486-aa95-5dcd3a72c1f8","Type":"ContainerDied","Data":"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89"} Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.227507 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bef25546-2579-4486-aa95-5dcd3a72c1f8","Type":"ContainerDied","Data":"ef27fda2bf71c391436771a5e27fdd6fae1a7cf18114067cd6e64de409294f4a"} Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.227522 4926 scope.go:117] "RemoveContainer" containerID="20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.227564 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.242888 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bef25546-2579-4486-aa95-5dcd3a72c1f8" (UID: "bef25546-2579-4486-aa95-5dcd3a72c1f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.247328 4926 generic.go:334] "Generic (PLEG): container finished" podID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerID="cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd" exitCode=143 Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.247363 4926 generic.go:334] "Generic (PLEG): container finished" podID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerID="feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c" exitCode=143 Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.247619 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" containerName="watcher-decision-engine" containerID="cri-o://f6e0cd719af7e50e84bd95403eb081833b04d346a00110ba117574648b791d8c" gracePeriod=30 Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.248009 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.250604 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b","Type":"ContainerDied","Data":"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd"} Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.250649 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b","Type":"ContainerDied","Data":"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c"} Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.250667 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"27cdfb5f-7604-405b-b4cd-33eeb3b8c09b","Type":"ContainerDied","Data":"80280509b395057a299306f1210b8447cd19fb068e0ace79b3930315d9acf1a8"} Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.250876 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.250898 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-applier-0" podUID="67831a6a-38bf-4f8c-98a8-c8cb0274e218" containerName="watcher-applier" containerID="cri-o://3caad3e7c74cab7798c34f391f1cd40df720d56b8e0eca2a300db717507dd858" gracePeriod=30 Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.275289 4926 scope.go:117] "RemoveContainer" containerID="62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.286515 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-config-data" (OuterVolumeSpecName: "config-data") pod "bef25546-2579-4486-aa95-5dcd3a72c1f8" (UID: "bef25546-2579-4486-aa95-5dcd3a72c1f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.301452 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.305361 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cfwx\" (UniqueName: \"kubernetes.io/projected/bef25546-2579-4486-aa95-5dcd3a72c1f8-kube-api-access-9cfwx\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.305425 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.305438 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.305468 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.305480 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.305489 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bef25546-2579-4486-aa95-5dcd3a72c1f8-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.305499 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bef25546-2579-4486-aa95-5dcd3a72c1f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.311835 4926 scope.go:117] "RemoveContainer" containerID="20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb" Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.312731 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb\": container with ID starting with 20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb not found: ID does not exist" containerID="20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.312780 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb"} err="failed to get container status \"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb\": rpc error: code = NotFound desc = could not find container \"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb\": container with ID starting with 20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.312832 4926 scope.go:117] "RemoveContainer" containerID="62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89" Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.319528 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89\": container with ID starting with 62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89 not found: ID does not exist" containerID="62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.319584 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89"} err="failed to get container status \"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89\": rpc error: code = NotFound desc = could not find container \"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89\": container with ID starting with 62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89 not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.319623 4926 scope.go:117] "RemoveContainer" containerID="20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.320262 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb"} err="failed to get container status \"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb\": rpc error: code = NotFound desc = could not find container \"20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb\": container with ID starting with 20a691c19b16ca7646da8c3e8174591d2117f1006746625f4a25ed03170988cb not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.320307 4926 scope.go:117] "RemoveContainer" containerID="62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.320688 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89"} err="failed to get container status \"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89\": rpc error: code = NotFound desc = could not find container \"62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89\": container with ID starting with 62804744f5c863bda3ba20ae614e2fa38c263d64ab91828e7da1b4a7c4b52d89 not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.320712 4926 scope.go:117] "RemoveContainer" containerID="cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.327540 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.349917 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.375435 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" path="/var/lib/kubelet/pods/27cdfb5f-7604-405b-b4cd-33eeb3b8c09b/volumes" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376272 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.376641 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-httpd" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376655 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-httpd" Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.376673 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-httpd" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376679 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-httpd" Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.376704 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-log" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376711 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-log" Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.376720 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-log" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376726 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-log" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376903 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-log" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376917 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" containerName="glance-httpd" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376930 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-httpd" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.376945 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="27cdfb5f-7604-405b-b4cd-33eeb3b8c09b" containerName="glance-log" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.377881 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.377961 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.380852 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.385325 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.391602 4926 scope.go:117] "RemoveContainer" containerID="feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.407754 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.426244 4926 scope.go:117] "RemoveContainer" containerID="cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd" Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.426736 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd\": container with ID starting with cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd not found: ID does not exist" containerID="cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.426766 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd"} err="failed to get container status \"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd\": rpc error: code = NotFound desc = could not find container \"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd\": container with ID starting with cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.426791 4926 scope.go:117] "RemoveContainer" containerID="feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c" Nov 25 18:32:18 crc kubenswrapper[4926]: E1125 18:32:18.427145 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c\": container with ID starting with feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c not found: ID does not exist" containerID="feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.427161 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c"} err="failed to get container status \"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c\": rpc error: code = NotFound desc = could not find container \"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c\": container with ID starting with feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.427174 4926 scope.go:117] "RemoveContainer" containerID="cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.427427 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd"} err="failed to get container status \"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd\": rpc error: code = NotFound desc = could not find container \"cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd\": container with ID starting with cf00e49105cbeea4a3309931d8b115d604af855ca443a3b53362ce4cb11b7cbd not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.427442 4926 scope.go:117] "RemoveContainer" containerID="feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.427625 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c"} err="failed to get container status \"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c\": rpc error: code = NotFound desc = could not find container \"feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c\": container with ID starting with feb1ce7d7fe2f0079a281039aedf2c844779ffb8607dc604289a831acf5a2b2c not found: ID does not exist" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.509700 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8nx8\" (UniqueName: \"kubernetes.io/projected/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-kube-api-access-p8nx8\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.510031 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.510084 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.510250 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.510351 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-scripts\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.510462 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-config-data\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.510499 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.510568 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-logs\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.554005 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.565065 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.585381 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.587080 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.591353 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.592008 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612118 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8nx8\" (UniqueName: \"kubernetes.io/projected/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-kube-api-access-p8nx8\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612189 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612238 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612289 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612322 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-scripts\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612354 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-config-data\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612391 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612435 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-logs\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.612930 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-logs\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.613558 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.616048 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.616995 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.628172 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-scripts\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.628300 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.630670 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.630840 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-config-data\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.639438 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8nx8\" (UniqueName: \"kubernetes.io/projected/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-kube-api-access-p8nx8\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.665170 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.710058 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.713962 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.714011 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.714039 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-logs\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.714098 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.714136 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.714176 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77jh5\" (UniqueName: \"kubernetes.io/projected/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-kube-api-access-77jh5\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.714204 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.714232 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815714 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815773 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77jh5\" (UniqueName: \"kubernetes.io/projected/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-kube-api-access-77jh5\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815815 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815842 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815873 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815898 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815921 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-logs\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.815980 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.816273 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.817636 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-logs\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.817784 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.823989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.824599 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.832473 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.840443 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77jh5\" (UniqueName: \"kubernetes.io/projected/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-kube-api-access-77jh5\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.841530 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.855322 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.857201 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.902331 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.902668 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 25 18:32:18 crc kubenswrapper[4926]: I1125 18:32:18.928459 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.288826 4926 generic.go:334] "Generic (PLEG): container finished" podID="67831a6a-38bf-4f8c-98a8-c8cb0274e218" containerID="3caad3e7c74cab7798c34f391f1cd40df720d56b8e0eca2a300db717507dd858" exitCode=0 Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.290165 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"67831a6a-38bf-4f8c-98a8-c8cb0274e218","Type":"ContainerDied","Data":"3caad3e7c74cab7798c34f391f1cd40df720d56b8e0eca2a300db717507dd858"} Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.290353 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.367639 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.450771 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.621011 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:32:19 crc kubenswrapper[4926]: W1125 18:32:19.629817 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8cbf5bf6_4cf7_42bd_8f71_89237d6dc841.slice/crio-c3e55a393cfa0ddf7b8bccf4e039290d6e8e6719b33e2cb232c4706ede868ccd WatchSource:0}: Error finding container c3e55a393cfa0ddf7b8bccf4e039290d6e8e6719b33e2cb232c4706ede868ccd: Status 404 returned error can't find the container with id c3e55a393cfa0ddf7b8bccf4e039290d6e8e6719b33e2cb232c4706ede868ccd Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.641440 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 25 18:32:19 crc kubenswrapper[4926]: E1125 18:32:19.761335 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2205cef_0292_4e62_b282_0acd7f50f920.slice/crio-3f0799115ea5976d342803d3ec287b7e11216070aa59088280c0f0799da32aef.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2205cef_0292_4e62_b282_0acd7f50f920.slice/crio-conmon-3f0799115ea5976d342803d3ec287b7e11216070aa59088280c0f0799da32aef.scope\": RecentStats: unable to find data in memory cache]" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.772153 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-config-data\") pod \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.772266 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbhsm\" (UniqueName: \"kubernetes.io/projected/67831a6a-38bf-4f8c-98a8-c8cb0274e218-kube-api-access-nbhsm\") pod \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.772347 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67831a6a-38bf-4f8c-98a8-c8cb0274e218-logs\") pod \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.772516 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-combined-ca-bundle\") pod \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\" (UID: \"67831a6a-38bf-4f8c-98a8-c8cb0274e218\") " Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.773137 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67831a6a-38bf-4f8c-98a8-c8cb0274e218-logs" (OuterVolumeSpecName: "logs") pod "67831a6a-38bf-4f8c-98a8-c8cb0274e218" (UID: "67831a6a-38bf-4f8c-98a8-c8cb0274e218"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.781248 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67831a6a-38bf-4f8c-98a8-c8cb0274e218-kube-api-access-nbhsm" (OuterVolumeSpecName: "kube-api-access-nbhsm") pod "67831a6a-38bf-4f8c-98a8-c8cb0274e218" (UID: "67831a6a-38bf-4f8c-98a8-c8cb0274e218"). InnerVolumeSpecName "kube-api-access-nbhsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.875472 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbhsm\" (UniqueName: \"kubernetes.io/projected/67831a6a-38bf-4f8c-98a8-c8cb0274e218-kube-api-access-nbhsm\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.875504 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67831a6a-38bf-4f8c-98a8-c8cb0274e218-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.891487 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-config-data" (OuterVolumeSpecName: "config-data") pod "67831a6a-38bf-4f8c-98a8-c8cb0274e218" (UID: "67831a6a-38bf-4f8c-98a8-c8cb0274e218"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.944589 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67831a6a-38bf-4f8c-98a8-c8cb0274e218" (UID: "67831a6a-38bf-4f8c-98a8-c8cb0274e218"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.948638 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/watcher-api-0" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.163:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.979088 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:19 crc kubenswrapper[4926]: I1125 18:32:19.979131 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67831a6a-38bf-4f8c-98a8-c8cb0274e218-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.318031 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.318027 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"67831a6a-38bf-4f8c-98a8-c8cb0274e218","Type":"ContainerDied","Data":"d02d55465204343c01ee7a14aaec70e612220e748441d6f1c435527067817635"} Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.318460 4926 scope.go:117] "RemoveContainer" containerID="3caad3e7c74cab7798c34f391f1cd40df720d56b8e0eca2a300db717507dd858" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.325006 4926 generic.go:334] "Generic (PLEG): container finished" podID="de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" containerID="f6e0cd719af7e50e84bd95403eb081833b04d346a00110ba117574648b791d8c" exitCode=1 Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.325050 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9","Type":"ContainerDied","Data":"f6e0cd719af7e50e84bd95403eb081833b04d346a00110ba117574648b791d8c"} Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.333312 4926 generic.go:334] "Generic (PLEG): container finished" podID="f2205cef-0292-4e62-b282-0acd7f50f920" containerID="3f0799115ea5976d342803d3ec287b7e11216070aa59088280c0f0799da32aef" exitCode=0 Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.348409 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bef25546-2579-4486-aa95-5dcd3a72c1f8" path="/var/lib/kubelet/pods/bef25546-2579-4486-aa95-5dcd3a72c1f8/volumes" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.350049 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-52ncr" event={"ID":"f2205cef-0292-4e62-b282-0acd7f50f920","Type":"ContainerDied","Data":"3f0799115ea5976d342803d3ec287b7e11216070aa59088280c0f0799da32aef"} Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.350191 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"18b3f2f7-c24a-4cbe-af19-4a124a7b393e","Type":"ContainerStarted","Data":"6458834468da38cef67554ca0958ca0d3c4b9c0653024cbf500ef0abaf350fab"} Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.350274 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841","Type":"ContainerStarted","Data":"c3e55a393cfa0ddf7b8bccf4e039290d6e8e6719b33e2cb232c4706ede868ccd"} Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.419301 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.430569 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.435097 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:32:20 crc kubenswrapper[4926]: E1125 18:32:20.435485 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67831a6a-38bf-4f8c-98a8-c8cb0274e218" containerName="watcher-applier" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.435497 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="67831a6a-38bf-4f8c-98a8-c8cb0274e218" containerName="watcher-applier" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.435682 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="67831a6a-38bf-4f8c-98a8-c8cb0274e218" containerName="watcher-applier" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.436263 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.439020 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.457482 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.591366 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk85x\" (UniqueName: \"kubernetes.io/projected/2f870c30-7159-4613-a2a3-bee7bf700ac8-kube-api-access-zk85x\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.591427 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f870c30-7159-4613-a2a3-bee7bf700ac8-logs\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.591465 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f870c30-7159-4613-a2a3-bee7bf700ac8-config-data\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.591652 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f870c30-7159-4613-a2a3-bee7bf700ac8-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.694087 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f870c30-7159-4613-a2a3-bee7bf700ac8-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.694681 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk85x\" (UniqueName: \"kubernetes.io/projected/2f870c30-7159-4613-a2a3-bee7bf700ac8-kube-api-access-zk85x\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.694718 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f870c30-7159-4613-a2a3-bee7bf700ac8-logs\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.694761 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f870c30-7159-4613-a2a3-bee7bf700ac8-config-data\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.697306 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f870c30-7159-4613-a2a3-bee7bf700ac8-logs\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.701127 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f870c30-7159-4613-a2a3-bee7bf700ac8-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.701626 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f870c30-7159-4613-a2a3-bee7bf700ac8-config-data\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.713591 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk85x\" (UniqueName: \"kubernetes.io/projected/2f870c30-7159-4613-a2a3-bee7bf700ac8-kube-api-access-zk85x\") pod \"watcher-applier-0\" (UID: \"2f870c30-7159-4613-a2a3-bee7bf700ac8\") " pod="openstack/watcher-applier-0" Nov 25 18:32:20 crc kubenswrapper[4926]: I1125 18:32:20.798707 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 25 18:32:21 crc kubenswrapper[4926]: I1125 18:32:21.371497 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841","Type":"ContainerStarted","Data":"52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca"} Nov 25 18:32:21 crc kubenswrapper[4926]: I1125 18:32:21.377446 4926 generic.go:334] "Generic (PLEG): container finished" podID="4addbd96-e765-4c9c-b260-5a80700849d2" containerID="ceac441c09b1ab45b04be4d908d2fa69eb4a3016e0432b031f87cc78b7d3ce19" exitCode=0 Nov 25 18:32:21 crc kubenswrapper[4926]: I1125 18:32:21.377511 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pvqdw" event={"ID":"4addbd96-e765-4c9c-b260-5a80700849d2","Type":"ContainerDied","Data":"ceac441c09b1ab45b04be4d908d2fa69eb4a3016e0432b031f87cc78b7d3ce19"} Nov 25 18:32:21 crc kubenswrapper[4926]: I1125 18:32:21.382454 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"18b3f2f7-c24a-4cbe-af19-4a124a7b393e","Type":"ContainerStarted","Data":"ac46ce64187e0857cfede484e0b57b8c0d0af2323862d2086ca42f29b7e939e5"} Nov 25 18:32:22 crc kubenswrapper[4926]: I1125 18:32:22.347346 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67831a6a-38bf-4f8c-98a8-c8cb0274e218" path="/var/lib/kubelet/pods/67831a6a-38bf-4f8c-98a8-c8cb0274e218/volumes" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.151631 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pvqdw" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.155719 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.164459 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.275975 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-config-data\") pod \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276028 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4addbd96-e765-4c9c-b260-5a80700849d2-logs\") pod \"4addbd96-e765-4c9c-b260-5a80700849d2\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276100 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-config-data\") pod \"4addbd96-e765-4c9c-b260-5a80700849d2\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276121 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-config-data\") pod \"f2205cef-0292-4e62-b282-0acd7f50f920\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276139 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-combined-ca-bundle\") pod \"4addbd96-e765-4c9c-b260-5a80700849d2\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276168 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-scripts\") pod \"4addbd96-e765-4c9c-b260-5a80700849d2\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276229 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-combined-ca-bundle\") pod \"f2205cef-0292-4e62-b282-0acd7f50f920\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276246 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmscx\" (UniqueName: \"kubernetes.io/projected/f2205cef-0292-4e62-b282-0acd7f50f920-kube-api-access-hmscx\") pod \"f2205cef-0292-4e62-b282-0acd7f50f920\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276301 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8d9t\" (UniqueName: \"kubernetes.io/projected/4addbd96-e765-4c9c-b260-5a80700849d2-kube-api-access-n8d9t\") pod \"4addbd96-e765-4c9c-b260-5a80700849d2\" (UID: \"4addbd96-e765-4c9c-b260-5a80700849d2\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276329 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-logs\") pod \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276363 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-custom-prometheus-ca\") pod \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276409 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skk2n\" (UniqueName: \"kubernetes.io/projected/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-kube-api-access-skk2n\") pod \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276430 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-credential-keys\") pod \"f2205cef-0292-4e62-b282-0acd7f50f920\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276456 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-combined-ca-bundle\") pod \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\" (UID: \"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-scripts\") pod \"f2205cef-0292-4e62-b282-0acd7f50f920\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.276515 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-fernet-keys\") pod \"f2205cef-0292-4e62-b282-0acd7f50f920\" (UID: \"f2205cef-0292-4e62-b282-0acd7f50f920\") " Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.287136 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4addbd96-e765-4c9c-b260-5a80700849d2-logs" (OuterVolumeSpecName: "logs") pod "4addbd96-e765-4c9c-b260-5a80700849d2" (UID: "4addbd96-e765-4c9c-b260-5a80700849d2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.291040 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f2205cef-0292-4e62-b282-0acd7f50f920" (UID: "f2205cef-0292-4e62-b282-0acd7f50f920"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.298527 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-scripts" (OuterVolumeSpecName: "scripts") pod "4addbd96-e765-4c9c-b260-5a80700849d2" (UID: "4addbd96-e765-4c9c-b260-5a80700849d2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.298532 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-scripts" (OuterVolumeSpecName: "scripts") pod "f2205cef-0292-4e62-b282-0acd7f50f920" (UID: "f2205cef-0292-4e62-b282-0acd7f50f920"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.300359 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-logs" (OuterVolumeSpecName: "logs") pod "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" (UID: "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.306548 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4addbd96-e765-4c9c-b260-5a80700849d2-kube-api-access-n8d9t" (OuterVolumeSpecName: "kube-api-access-n8d9t") pod "4addbd96-e765-4c9c-b260-5a80700849d2" (UID: "4addbd96-e765-4c9c-b260-5a80700849d2"). InnerVolumeSpecName "kube-api-access-n8d9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.315220 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-kube-api-access-skk2n" (OuterVolumeSpecName: "kube-api-access-skk2n") pod "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" (UID: "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9"). InnerVolumeSpecName "kube-api-access-skk2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.321623 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "f2205cef-0292-4e62-b282-0acd7f50f920" (UID: "f2205cef-0292-4e62-b282-0acd7f50f920"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.326145 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2205cef-0292-4e62-b282-0acd7f50f920-kube-api-access-hmscx" (OuterVolumeSpecName: "kube-api-access-hmscx") pod "f2205cef-0292-4e62-b282-0acd7f50f920" (UID: "f2205cef-0292-4e62-b282-0acd7f50f920"). InnerVolumeSpecName "kube-api-access-hmscx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.329023 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-config-data" (OuterVolumeSpecName: "config-data") pod "4addbd96-e765-4c9c-b260-5a80700849d2" (UID: "4addbd96-e765-4c9c-b260-5a80700849d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.338193 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-config-data" (OuterVolumeSpecName: "config-data") pod "f2205cef-0292-4e62-b282-0acd7f50f920" (UID: "f2205cef-0292-4e62-b282-0acd7f50f920"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.352494 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4addbd96-e765-4c9c-b260-5a80700849d2" (UID: "4addbd96-e765-4c9c-b260-5a80700849d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.361166 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" (UID: "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.366526 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" (UID: "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379526 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379577 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379589 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379599 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmscx\" (UniqueName: \"kubernetes.io/projected/f2205cef-0292-4e62-b282-0acd7f50f920-kube-api-access-hmscx\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379607 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8d9t\" (UniqueName: \"kubernetes.io/projected/4addbd96-e765-4c9c-b260-5a80700849d2-kube-api-access-n8d9t\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379639 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379650 4926 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379658 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skk2n\" (UniqueName: \"kubernetes.io/projected/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-kube-api-access-skk2n\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379668 4926 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379678 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379685 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379692 4926 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379724 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4addbd96-e765-4c9c-b260-5a80700849d2-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.379734 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4addbd96-e765-4c9c-b260-5a80700849d2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.387172 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2205cef-0292-4e62-b282-0acd7f50f920" (UID: "f2205cef-0292-4e62-b282-0acd7f50f920"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.398525 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-config-data" (OuterVolumeSpecName: "config-data") pod "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" (UID: "de1e3a0d-e8d3-4300-be4e-62f3b8a851c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.420421 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-52ncr" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.422200 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-pvqdw" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.423633 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.483853 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-52ncr" event={"ID":"f2205cef-0292-4e62-b282-0acd7f50f920","Type":"ContainerDied","Data":"896708d999cada4601c257280a0b02d31a51faf55b707f530c2b4f8582054471"} Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.483914 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="896708d999cada4601c257280a0b02d31a51faf55b707f530c2b4f8582054471" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.483928 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-pvqdw" event={"ID":"4addbd96-e765-4c9c-b260-5a80700849d2","Type":"ContainerDied","Data":"88e9957e39cb384b940c1bff5a9b40ae1df9cf0326df966a9b91c7680fd65ff9"} Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.483941 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88e9957e39cb384b940c1bff5a9b40ae1df9cf0326df966a9b91c7680fd65ff9" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.483952 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"de1e3a0d-e8d3-4300-be4e-62f3b8a851c9","Type":"ContainerDied","Data":"390cc830c2f770a5cb6c44a7a432ab3f8df85a276ac91d2ad1130768c748eb82"} Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.484817 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.484876 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2205cef-0292-4e62-b282-0acd7f50f920-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.531031 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.548152 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.589430 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:32:24 crc kubenswrapper[4926]: E1125 18:32:24.589863 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4addbd96-e765-4c9c-b260-5a80700849d2" containerName="placement-db-sync" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.589881 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4addbd96-e765-4c9c-b260-5a80700849d2" containerName="placement-db-sync" Nov 25 18:32:24 crc kubenswrapper[4926]: E1125 18:32:24.589905 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2205cef-0292-4e62-b282-0acd7f50f920" containerName="keystone-bootstrap" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.589911 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2205cef-0292-4e62-b282-0acd7f50f920" containerName="keystone-bootstrap" Nov 25 18:32:24 crc kubenswrapper[4926]: E1125 18:32:24.589938 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" containerName="watcher-decision-engine" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.589945 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" containerName="watcher-decision-engine" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.590116 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" containerName="watcher-decision-engine" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.590141 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2205cef-0292-4e62-b282-0acd7f50f920" containerName="keystone-bootstrap" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.590153 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4addbd96-e765-4c9c-b260-5a80700849d2" containerName="placement-db-sync" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.590828 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.593551 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.597546 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.690967 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.691093 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.691142 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-config-data\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.691325 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f664100-2926-4e80-a06e-5c09021eb736-logs\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.691427 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jq72\" (UniqueName: \"kubernetes.io/projected/9f664100-2926-4e80-a06e-5c09021eb736-kube-api-access-4jq72\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.793392 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.793468 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-config-data\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.793544 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f664100-2926-4e80-a06e-5c09021eb736-logs\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.793621 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jq72\" (UniqueName: \"kubernetes.io/projected/9f664100-2926-4e80-a06e-5c09021eb736-kube-api-access-4jq72\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.793661 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.794587 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f664100-2926-4e80-a06e-5c09021eb736-logs\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.801584 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-config-data\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.801785 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.802035 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.817389 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jq72\" (UniqueName: \"kubernetes.io/projected/9f664100-2926-4e80-a06e-5c09021eb736-kube-api-access-4jq72\") pod \"watcher-decision-engine-0\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:32:24 crc kubenswrapper[4926]: I1125 18:32:24.912975 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.276385 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-547c7d8d4d-wq9d8"] Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.278385 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.284219 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.284244 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.284706 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.284726 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.284804 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-wzmlf" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.308199 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-547c7d8d4d-wq9d8"] Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.319453 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.319621 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.321036 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-765875bb4b-tr7fm" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.161:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.161:8443: connect: connection refused" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.371213 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7dc866d94f-flgn2"] Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.372537 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.377832 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.377950 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.378047 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.378242 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.378304 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.378489 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2fxt6" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.382647 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7dc866d94f-flgn2"] Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.415350 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-scripts\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.415407 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-config-data\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.415443 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-internal-tls-certs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.415486 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n7r7\" (UniqueName: \"kubernetes.io/projected/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-kube-api-access-5n7r7\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.415524 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-logs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.415607 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-combined-ca-bundle\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.415637 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-public-tls-certs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.431253 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.431852 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.433487 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-58ffdb7978-lnv9j" podUID="f0edd267-7b26-44cc-a576-552e8ff49e66" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.517815 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-scripts\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.517874 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-internal-tls-certs\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.517915 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-combined-ca-bundle\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.517938 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-scripts\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.517959 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-config-data\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.517997 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-internal-tls-certs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518016 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t82q\" (UniqueName: \"kubernetes.io/projected/33f80090-20c8-407b-86ae-7ba88229140d-kube-api-access-2t82q\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518036 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-public-tls-certs\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518066 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n7r7\" (UniqueName: \"kubernetes.io/projected/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-kube-api-access-5n7r7\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518115 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-credential-keys\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518133 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-logs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518163 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-fernet-keys\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-combined-ca-bundle\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518618 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-config-data\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.518658 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-public-tls-certs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.522027 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-logs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.527411 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-config-data\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.527623 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-scripts\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.527810 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-internal-tls-certs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.528618 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-public-tls-certs\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.534063 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-combined-ca-bundle\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.554074 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n7r7\" (UniqueName: \"kubernetes.io/projected/89428ba2-a2c6-40eb-9e3f-878ebf7193c1-kube-api-access-5n7r7\") pod \"placement-547c7d8d4d-wq9d8\" (UID: \"89428ba2-a2c6-40eb-9e3f-878ebf7193c1\") " pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.619814 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620509 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-config-data\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620606 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-scripts\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620650 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-internal-tls-certs\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620706 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-combined-ca-bundle\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620764 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t82q\" (UniqueName: \"kubernetes.io/projected/33f80090-20c8-407b-86ae-7ba88229140d-kube-api-access-2t82q\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620796 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-public-tls-certs\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620868 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-credential-keys\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.620933 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-fernet-keys\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.626291 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-public-tls-certs\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.626950 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-combined-ca-bundle\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.630213 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-internal-tls-certs\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.630280 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-config-data\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.631095 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-credential-keys\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.632862 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-fernet-keys\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.635966 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33f80090-20c8-407b-86ae-7ba88229140d-scripts\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.639202 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t82q\" (UniqueName: \"kubernetes.io/projected/33f80090-20c8-407b-86ae-7ba88229140d-kube-api-access-2t82q\") pod \"keystone-7dc866d94f-flgn2\" (UID: \"33f80090-20c8-407b-86ae-7ba88229140d\") " pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.695880 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:25 crc kubenswrapper[4926]: I1125 18:32:25.937118 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:32:26 crc kubenswrapper[4926]: I1125 18:32:26.005151 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6489b9cb77-sskkb"] Nov 25 18:32:26 crc kubenswrapper[4926]: I1125 18:32:26.005504 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" containerName="dnsmasq-dns" containerID="cri-o://04ecc17731a8f7003a6eeaa3ad4ab169febc4fffb25b0ad3ba7b22cc17c10607" gracePeriod=10 Nov 25 18:32:26 crc kubenswrapper[4926]: I1125 18:32:26.349172 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de1e3a0d-e8d3-4300-be4e-62f3b8a851c9" path="/var/lib/kubelet/pods/de1e3a0d-e8d3-4300-be4e-62f3b8a851c9/volumes" Nov 25 18:32:26 crc kubenswrapper[4926]: I1125 18:32:26.462532 4926 generic.go:334] "Generic (PLEG): container finished" podID="3f8be116-3710-4f26-bde2-348f96675a2c" containerID="04ecc17731a8f7003a6eeaa3ad4ab169febc4fffb25b0ad3ba7b22cc17c10607" exitCode=0 Nov 25 18:32:26 crc kubenswrapper[4926]: I1125 18:32:26.462578 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" event={"ID":"3f8be116-3710-4f26-bde2-348f96675a2c","Type":"ContainerDied","Data":"04ecc17731a8f7003a6eeaa3ad4ab169febc4fffb25b0ad3ba7b22cc17c10607"} Nov 25 18:32:26 crc kubenswrapper[4926]: I1125 18:32:26.551264 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.159:5353: connect: connection refused" Nov 25 18:32:26 crc kubenswrapper[4926]: I1125 18:32:26.984738 4926 scope.go:117] "RemoveContainer" containerID="f6e0cd719af7e50e84bd95403eb081833b04d346a00110ba117574648b791d8c" Nov 25 18:32:28 crc kubenswrapper[4926]: I1125 18:32:28.900243 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 25 18:32:28 crc kubenswrapper[4926]: I1125 18:32:28.910677 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.529055 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" event={"ID":"3f8be116-3710-4f26-bde2-348f96675a2c","Type":"ContainerDied","Data":"dfbc5abc69f35fd3f8ca18dd647f0195e0bd730afec376c2c5a888f0aec3f2e8"} Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.529505 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dfbc5abc69f35fd3f8ca18dd647f0195e0bd730afec376c2c5a888f0aec3f2e8" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.646761 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.734238 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lk9d\" (UniqueName: \"kubernetes.io/projected/3f8be116-3710-4f26-bde2-348f96675a2c-kube-api-access-4lk9d\") pod \"3f8be116-3710-4f26-bde2-348f96675a2c\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.734676 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-swift-storage-0\") pod \"3f8be116-3710-4f26-bde2-348f96675a2c\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.734720 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-config\") pod \"3f8be116-3710-4f26-bde2-348f96675a2c\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.734917 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-svc\") pod \"3f8be116-3710-4f26-bde2-348f96675a2c\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.735010 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-nb\") pod \"3f8be116-3710-4f26-bde2-348f96675a2c\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.735053 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-sb\") pod \"3f8be116-3710-4f26-bde2-348f96675a2c\" (UID: \"3f8be116-3710-4f26-bde2-348f96675a2c\") " Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.740255 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f8be116-3710-4f26-bde2-348f96675a2c-kube-api-access-4lk9d" (OuterVolumeSpecName: "kube-api-access-4lk9d") pod "3f8be116-3710-4f26-bde2-348f96675a2c" (UID: "3f8be116-3710-4f26-bde2-348f96675a2c"). InnerVolumeSpecName "kube-api-access-4lk9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.792350 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3f8be116-3710-4f26-bde2-348f96675a2c" (UID: "3f8be116-3710-4f26-bde2-348f96675a2c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.806839 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3f8be116-3710-4f26-bde2-348f96675a2c" (UID: "3f8be116-3710-4f26-bde2-348f96675a2c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.811455 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-config" (OuterVolumeSpecName: "config") pod "3f8be116-3710-4f26-bde2-348f96675a2c" (UID: "3f8be116-3710-4f26-bde2-348f96675a2c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.818103 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3f8be116-3710-4f26-bde2-348f96675a2c" (UID: "3f8be116-3710-4f26-bde2-348f96675a2c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.837438 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.837467 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.837477 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lk9d\" (UniqueName: \"kubernetes.io/projected/3f8be116-3710-4f26-bde2-348f96675a2c-kube-api-access-4lk9d\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.837489 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.837499 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.866131 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3f8be116-3710-4f26-bde2-348f96675a2c" (UID: "3f8be116-3710-4f26-bde2-348f96675a2c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.874711 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 25 18:32:29 crc kubenswrapper[4926]: W1125 18:32:29.890270 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f870c30_7159_4613_a2a3_bee7bf700ac8.slice/crio-c95a186a18703db3fe68e9dae61506c69fe1192f9df9917dfdf6f20a26a62446 WatchSource:0}: Error finding container c95a186a18703db3fe68e9dae61506c69fe1192f9df9917dfdf6f20a26a62446: Status 404 returned error can't find the container with id c95a186a18703db3fe68e9dae61506c69fe1192f9df9917dfdf6f20a26a62446 Nov 25 18:32:29 crc kubenswrapper[4926]: I1125 18:32:29.938945 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3f8be116-3710-4f26-bde2-348f96675a2c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.080952 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.178828 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-547c7d8d4d-wq9d8"] Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.201699 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7dc866d94f-flgn2"] Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.595439 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerStarted","Data":"5807d3d885f8cd1c4a345edd91ce53d054005453d20263b419aa19f25f38c7ac"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.595694 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerStarted","Data":"11eb4fa6a1b17809506f3d2b5eeba6a3c4b1011fa8100da7126a7bab8c986efe"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.599106 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x8d9g" event={"ID":"017caf97-9f18-49b8-b6e6-597c709e3420","Type":"ContainerStarted","Data":"7b0eb78ad49b0f6308da6f264521dae46391514392d5ea5580ac14a62c0fd47f"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.609058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerStarted","Data":"7a921d4be478da69a6cb59d15f69a0232d6a23c2d93d18a19ead11cedde6bee8"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.623595 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"18b3f2f7-c24a-4cbe-af19-4a124a7b393e","Type":"ContainerStarted","Data":"1ab43ba372074d6a02b0f693a9b2a90660d4d8877b41f35e8307d4e28bc52f2c"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.632077 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-547c7d8d4d-wq9d8" event={"ID":"89428ba2-a2c6-40eb-9e3f-878ebf7193c1","Type":"ContainerStarted","Data":"ae1a730d6e127ae55bf8ae408a9a9bfcf44c878bbbeff6b832b511fc55b922ca"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.641634 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7dc866d94f-flgn2" event={"ID":"33f80090-20c8-407b-86ae-7ba88229140d","Type":"ContainerStarted","Data":"d0fad479314230acbcd2ae39a292e76ea345d3c796cb125307f6e372b31e95b8"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.645636 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-x8d9g" podStartSLOduration=3.834394806 podStartE2EDuration="55.645621747s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="2025-11-25 18:31:37.678117881 +0000 UTC m=+1128.063631486" lastFinishedPulling="2025-11-25 18:32:29.489344832 +0000 UTC m=+1179.874858427" observedRunningTime="2025-11-25 18:32:30.641030901 +0000 UTC m=+1181.026544506" watchObservedRunningTime="2025-11-25 18:32:30.645621747 +0000 UTC m=+1181.031135352" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.647039 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6489b9cb77-sskkb" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.648140 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"2f870c30-7159-4613-a2a3-bee7bf700ac8","Type":"ContainerStarted","Data":"17e70220c63674d55d1203d4528e799230cb5d87c29831336e2f2343f1bc8817"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.648169 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"2f870c30-7159-4613-a2a3-bee7bf700ac8","Type":"ContainerStarted","Data":"c95a186a18703db3fe68e9dae61506c69fe1192f9df9917dfdf6f20a26a62446"} Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.651559 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=6.651547378 podStartE2EDuration="6.651547378s" podCreationTimestamp="2025-11-25 18:32:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:30.623628029 +0000 UTC m=+1181.009141634" watchObservedRunningTime="2025-11-25 18:32:30.651547378 +0000 UTC m=+1181.037060983" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.669038 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=12.669022832 podStartE2EDuration="12.669022832s" podCreationTimestamp="2025-11-25 18:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:30.659224203 +0000 UTC m=+1181.044737808" watchObservedRunningTime="2025-11-25 18:32:30.669022832 +0000 UTC m=+1181.054536437" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.683739 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6489b9cb77-sskkb"] Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.711696 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6489b9cb77-sskkb"] Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.720279 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=10.720263554 podStartE2EDuration="10.720263554s" podCreationTimestamp="2025-11-25 18:32:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:30.713783289 +0000 UTC m=+1181.099296894" watchObservedRunningTime="2025-11-25 18:32:30.720263554 +0000 UTC m=+1181.105777159" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.799794 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.799837 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Nov 25 18:32:30 crc kubenswrapper[4926]: I1125 18:32:30.929795 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.691872 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jnbt6" event={"ID":"0138f286-e018-42de-b145-2cda09144394","Type":"ContainerStarted","Data":"520ea0e9d8dd3748068d6b0c4b1f21c3312cb88dd7caa9da843d0bd092df244a"} Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.700797 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-547c7d8d4d-wq9d8" event={"ID":"89428ba2-a2c6-40eb-9e3f-878ebf7193c1","Type":"ContainerStarted","Data":"d516bc6f9c0ce00993f89675e4e06382385b2a3dff4f2bfa82cc199791438362"} Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.700832 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-547c7d8d4d-wq9d8" event={"ID":"89428ba2-a2c6-40eb-9e3f-878ebf7193c1","Type":"ContainerStarted","Data":"da91e7ccce56f6f286bea2ee71642ba85c3ffbee2afbdec5b524967de2dac05b"} Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.701592 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.701614 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.711696 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7dc866d94f-flgn2" event={"ID":"33f80090-20c8-407b-86ae-7ba88229140d","Type":"ContainerStarted","Data":"155f63a0b18cd332ef067785284df3cf38e9ebec15b54f02df2e279fc1ab1602"} Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.712391 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.713663 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-jnbt6" podStartSLOduration=4.598576252 podStartE2EDuration="56.713644252s" podCreationTimestamp="2025-11-25 18:31:35 +0000 UTC" firstStartedPulling="2025-11-25 18:31:37.356703275 +0000 UTC m=+1127.742216890" lastFinishedPulling="2025-11-25 18:32:29.471771295 +0000 UTC m=+1179.857284890" observedRunningTime="2025-11-25 18:32:31.712604665 +0000 UTC m=+1182.098118270" watchObservedRunningTime="2025-11-25 18:32:31.713644252 +0000 UTC m=+1182.099157857" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.731417 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841","Type":"ContainerStarted","Data":"dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819"} Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.783022 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-547c7d8d4d-wq9d8" podStartSLOduration=6.783000583 podStartE2EDuration="6.783000583s" podCreationTimestamp="2025-11-25 18:32:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:31.751590725 +0000 UTC m=+1182.137104330" watchObservedRunningTime="2025-11-25 18:32:31.783000583 +0000 UTC m=+1182.168514188" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.819174 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.837722 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=13.837700763 podStartE2EDuration="13.837700763s" podCreationTimestamp="2025-11-25 18:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:31.829433173 +0000 UTC m=+1182.214946778" watchObservedRunningTime="2025-11-25 18:32:31.837700763 +0000 UTC m=+1182.223214368" Nov 25 18:32:31 crc kubenswrapper[4926]: I1125 18:32:31.838537 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7dc866d94f-flgn2" podStartSLOduration=6.838530744 podStartE2EDuration="6.838530744s" podCreationTimestamp="2025-11-25 18:32:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:31.784239305 +0000 UTC m=+1182.169752910" watchObservedRunningTime="2025-11-25 18:32:31.838530744 +0000 UTC m=+1182.224044349" Nov 25 18:32:32 crc kubenswrapper[4926]: I1125 18:32:32.344342 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" path="/var/lib/kubelet/pods/3f8be116-3710-4f26-bde2-348f96675a2c/volumes" Nov 25 18:32:32 crc kubenswrapper[4926]: I1125 18:32:32.748144 4926 generic.go:334] "Generic (PLEG): container finished" podID="434bea04-3768-493f-8d01-36f9c41bc811" containerID="c532dd2be7b122443980153da9bb60983ebf9d3d9ac7994e8762e25ecbf02b10" exitCode=0 Nov 25 18:32:32 crc kubenswrapper[4926]: I1125 18:32:32.748299 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-stgf4" event={"ID":"434bea04-3768-493f-8d01-36f9c41bc811","Type":"ContainerDied","Data":"c532dd2be7b122443980153da9bb60983ebf9d3d9ac7994e8762e25ecbf02b10"} Nov 25 18:32:32 crc kubenswrapper[4926]: I1125 18:32:32.848186 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:32 crc kubenswrapper[4926]: I1125 18:32:32.848456 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api-log" containerID="cri-o://007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6" gracePeriod=30 Nov 25 18:32:32 crc kubenswrapper[4926]: I1125 18:32:32.848541 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api" containerID="cri-o://7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719" gracePeriod=30 Nov 25 18:32:33 crc kubenswrapper[4926]: I1125 18:32:33.763301 4926 generic.go:334] "Generic (PLEG): container finished" podID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerID="007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6" exitCode=143 Nov 25 18:32:33 crc kubenswrapper[4926]: I1125 18:32:33.763362 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b757a4b3-7f06-4254-b5ca-86f0b0d05234","Type":"ContainerDied","Data":"007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6"} Nov 25 18:32:33 crc kubenswrapper[4926]: I1125 18:32:33.892133 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.163:9322/\": dial tcp 10.217.0.163:9322: connect: connection refused" Nov 25 18:32:33 crc kubenswrapper[4926]: I1125 18:32:33.892428 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.163:9322/\": dial tcp 10.217.0.163:9322: connect: connection refused" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.256929 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-stgf4" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.361890 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-config\") pod \"434bea04-3768-493f-8d01-36f9c41bc811\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.362073 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-combined-ca-bundle\") pod \"434bea04-3768-493f-8d01-36f9c41bc811\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.362229 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwx25\" (UniqueName: \"kubernetes.io/projected/434bea04-3768-493f-8d01-36f9c41bc811-kube-api-access-kwx25\") pod \"434bea04-3768-493f-8d01-36f9c41bc811\" (UID: \"434bea04-3768-493f-8d01-36f9c41bc811\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.369088 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/434bea04-3768-493f-8d01-36f9c41bc811-kube-api-access-kwx25" (OuterVolumeSpecName: "kube-api-access-kwx25") pod "434bea04-3768-493f-8d01-36f9c41bc811" (UID: "434bea04-3768-493f-8d01-36f9c41bc811"). InnerVolumeSpecName "kube-api-access-kwx25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.373506 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.389108 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-config" (OuterVolumeSpecName: "config") pod "434bea04-3768-493f-8d01-36f9c41bc811" (UID: "434bea04-3768-493f-8d01-36f9c41bc811"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.393472 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "434bea04-3768-493f-8d01-36f9c41bc811" (UID: "434bea04-3768-493f-8d01-36f9c41bc811"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.464917 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-custom-prometheus-ca\") pod \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.464969 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-combined-ca-bundle\") pod \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.464999 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgfmv\" (UniqueName: \"kubernetes.io/projected/b757a4b3-7f06-4254-b5ca-86f0b0d05234-kube-api-access-pgfmv\") pod \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.465112 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-config-data\") pod \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.465133 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b757a4b3-7f06-4254-b5ca-86f0b0d05234-logs\") pod \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\" (UID: \"b757a4b3-7f06-4254-b5ca-86f0b0d05234\") " Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.465621 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.465637 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/434bea04-3768-493f-8d01-36f9c41bc811-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.465650 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwx25\" (UniqueName: \"kubernetes.io/projected/434bea04-3768-493f-8d01-36f9c41bc811-kube-api-access-kwx25\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.466327 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b757a4b3-7f06-4254-b5ca-86f0b0d05234-logs" (OuterVolumeSpecName: "logs") pod "b757a4b3-7f06-4254-b5ca-86f0b0d05234" (UID: "b757a4b3-7f06-4254-b5ca-86f0b0d05234"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.469112 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b757a4b3-7f06-4254-b5ca-86f0b0d05234-kube-api-access-pgfmv" (OuterVolumeSpecName: "kube-api-access-pgfmv") pod "b757a4b3-7f06-4254-b5ca-86f0b0d05234" (UID: "b757a4b3-7f06-4254-b5ca-86f0b0d05234"). InnerVolumeSpecName "kube-api-access-pgfmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.495841 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "b757a4b3-7f06-4254-b5ca-86f0b0d05234" (UID: "b757a4b3-7f06-4254-b5ca-86f0b0d05234"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.497870 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b757a4b3-7f06-4254-b5ca-86f0b0d05234" (UID: "b757a4b3-7f06-4254-b5ca-86f0b0d05234"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.517248 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-config-data" (OuterVolumeSpecName: "config-data") pod "b757a4b3-7f06-4254-b5ca-86f0b0d05234" (UID: "b757a4b3-7f06-4254-b5ca-86f0b0d05234"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.567291 4926 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.567324 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.567336 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgfmv\" (UniqueName: \"kubernetes.io/projected/b757a4b3-7f06-4254-b5ca-86f0b0d05234-kube-api-access-pgfmv\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.567347 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b757a4b3-7f06-4254-b5ca-86f0b0d05234-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.567357 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b757a4b3-7f06-4254-b5ca-86f0b0d05234-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.787758 4926 generic.go:334] "Generic (PLEG): container finished" podID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerID="7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719" exitCode=0 Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.787851 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b757a4b3-7f06-4254-b5ca-86f0b0d05234","Type":"ContainerDied","Data":"7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719"} Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.787888 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"b757a4b3-7f06-4254-b5ca-86f0b0d05234","Type":"ContainerDied","Data":"011c7379c76f8e6b8f6eae7c49453992a73c4f6987df0da8aad7252787686e62"} Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.787913 4926 scope.go:117] "RemoveContainer" containerID="7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.791298 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.845992 4926 generic.go:334] "Generic (PLEG): container finished" podID="9f664100-2926-4e80-a06e-5c09021eb736" containerID="5807d3d885f8cd1c4a345edd91ce53d054005453d20263b419aa19f25f38c7ac" exitCode=1 Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.846293 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerDied","Data":"5807d3d885f8cd1c4a345edd91ce53d054005453d20263b419aa19f25f38c7ac"} Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.847480 4926 scope.go:117] "RemoveContainer" containerID="5807d3d885f8cd1c4a345edd91ce53d054005453d20263b419aa19f25f38c7ac" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.867622 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-stgf4" event={"ID":"434bea04-3768-493f-8d01-36f9c41bc811","Type":"ContainerDied","Data":"bd08fcae9f750343c0ae3eb8c74cc250d0588c9bced80844a18fa4bf4c809509"} Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.867663 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd08fcae9f750343c0ae3eb8c74cc250d0588c9bced80844a18fa4bf4c809509" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.867720 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-stgf4" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.904108 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.917281 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.917329 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.931457 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.954211 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:34 crc kubenswrapper[4926]: E1125 18:32:34.954677 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="434bea04-3768-493f-8d01-36f9c41bc811" containerName="neutron-db-sync" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.954694 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="434bea04-3768-493f-8d01-36f9c41bc811" containerName="neutron-db-sync" Nov 25 18:32:34 crc kubenswrapper[4926]: E1125 18:32:34.954711 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" containerName="init" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.954718 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" containerName="init" Nov 25 18:32:34 crc kubenswrapper[4926]: E1125 18:32:34.954725 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" containerName="dnsmasq-dns" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.954731 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" containerName="dnsmasq-dns" Nov 25 18:32:34 crc kubenswrapper[4926]: E1125 18:32:34.954787 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.954796 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api" Nov 25 18:32:34 crc kubenswrapper[4926]: E1125 18:32:34.954812 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api-log" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.954820 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api-log" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.954983 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f8be116-3710-4f26-bde2-348f96675a2c" containerName="dnsmasq-dns" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.955003 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="434bea04-3768-493f-8d01-36f9c41bc811" containerName="neutron-db-sync" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.955017 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api-log" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.955027 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" containerName="watcher-api" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.956006 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.960978 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.967883 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.971743 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 25 18:32:34 crc kubenswrapper[4926]: I1125 18:32:34.972026 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.095732 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-public-tls-certs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.095847 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.095910 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.095943 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e85535cf-74ad-4608-9752-f44beb920a02-logs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.096103 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.096158 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcm9p\" (UniqueName: \"kubernetes.io/projected/e85535cf-74ad-4608-9752-f44beb920a02-kube-api-access-wcm9p\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.096205 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-config-data\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.156089 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f9bd866f7-brhxw"] Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.157851 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.178398 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f9bd866f7-brhxw"] Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.193341 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-bbf6864d-nswqp"] Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.194882 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcm9p\" (UniqueName: \"kubernetes.io/projected/e85535cf-74ad-4608-9752-f44beb920a02-kube-api-access-wcm9p\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197816 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-config\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197836 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-swift-storage-0\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197855 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-config-data\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197886 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-public-tls-certs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197917 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197942 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197960 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e85535cf-74ad-4608-9752-f44beb920a02-logs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.197987 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-svc\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.198007 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzf46\" (UniqueName: \"kubernetes.io/projected/dfb35090-4520-4bdb-bb35-641859591713-kube-api-access-rzf46\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.198041 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.198057 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-nb\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.204237 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.206339 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.206530 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2sclw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.206633 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.206721 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.207393 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e85535cf-74ad-4608-9752-f44beb920a02-logs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.207783 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.208032 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.216726 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-public-tls-certs\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.219182 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e85535cf-74ad-4608-9752-f44beb920a02-config-data\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.246776 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcm9p\" (UniqueName: \"kubernetes.io/projected/e85535cf-74ad-4608-9752-f44beb920a02-kube-api-access-wcm9p\") pod \"watcher-api-0\" (UID: \"e85535cf-74ad-4608-9752-f44beb920a02\") " pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.247959 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-bbf6864d-nswqp"] Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.299439 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-config\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.299717 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-swift-storage-0\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.299819 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-config\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.299903 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-ovndb-tls-certs\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.299979 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-httpd-config\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.300111 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-svc\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.300195 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzf46\" (UniqueName: \"kubernetes.io/projected/dfb35090-4520-4bdb-bb35-641859591713-kube-api-access-rzf46\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.300279 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-combined-ca-bundle\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.300348 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c92gw\" (UniqueName: \"kubernetes.io/projected/42900e2c-5c74-417f-a60c-6955d8c0fc29-kube-api-access-c92gw\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.300491 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.300569 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-nb\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.301361 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-swift-storage-0\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.301621 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-config\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.302023 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-nb\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.302391 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.302493 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-svc\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.321496 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.326778 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzf46\" (UniqueName: \"kubernetes.io/projected/dfb35090-4520-4bdb-bb35-641859591713-kube-api-access-rzf46\") pod \"dnsmasq-dns-5f9bd866f7-brhxw\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.404800 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-config\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.404901 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-ovndb-tls-certs\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.404937 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-httpd-config\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.405830 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-combined-ca-bundle\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.405866 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c92gw\" (UniqueName: \"kubernetes.io/projected/42900e2c-5c74-417f-a60c-6955d8c0fc29-kube-api-access-c92gw\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.409519 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-config\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.410495 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-httpd-config\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.411775 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-ovndb-tls-certs\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.413015 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-combined-ca-bundle\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.425451 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c92gw\" (UniqueName: \"kubernetes.io/projected/42900e2c-5c74-417f-a60c-6955d8c0fc29-kube-api-access-c92gw\") pod \"neutron-bbf6864d-nswqp\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.612712 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:35 crc kubenswrapper[4926]: I1125 18:32:35.634068 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:32:36 crc kubenswrapper[4926]: I1125 18:32:36.357177 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b757a4b3-7f06-4254-b5ca-86f0b0d05234" path="/var/lib/kubelet/pods/b757a4b3-7f06-4254-b5ca-86f0b0d05234/volumes" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.170024 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.500877 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-87c6cbb57-5vxgs"] Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.502417 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.506002 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.506715 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.517189 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.537987 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-87c6cbb57-5vxgs"] Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.548931 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-config\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.548997 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-public-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.549067 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-combined-ca-bundle\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.549133 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsml2\" (UniqueName: \"kubernetes.io/projected/6e439ef2-db64-41e2-853a-a16e48f1607d-kube-api-access-tsml2\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.549160 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-internal-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.549196 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-httpd-config\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.549213 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-ovndb-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.650841 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-public-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.650968 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-combined-ca-bundle\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.651052 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsml2\" (UniqueName: \"kubernetes.io/projected/6e439ef2-db64-41e2-853a-a16e48f1607d-kube-api-access-tsml2\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.651074 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-internal-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.651099 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-httpd-config\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.651118 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-ovndb-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.651146 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-config\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.660319 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-public-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.661248 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-httpd-config\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.661917 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-combined-ca-bundle\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.662101 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-config\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.664699 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-ovndb-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.667902 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsml2\" (UniqueName: \"kubernetes.io/projected/6e439ef2-db64-41e2-853a-a16e48f1607d-kube-api-access-tsml2\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.696507 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e439ef2-db64-41e2-853a-a16e48f1607d-internal-tls-certs\") pod \"neutron-87c6cbb57-5vxgs\" (UID: \"6e439ef2-db64-41e2-853a-a16e48f1607d\") " pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:37 crc kubenswrapper[4926]: I1125 18:32:37.898319 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.711306 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.711868 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.740065 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.758345 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.918966 4926 generic.go:334] "Generic (PLEG): container finished" podID="017caf97-9f18-49b8-b6e6-597c709e3420" containerID="7b0eb78ad49b0f6308da6f264521dae46391514392d5ea5580ac14a62c0fd47f" exitCode=0 Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.919047 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x8d9g" event={"ID":"017caf97-9f18-49b8-b6e6-597c709e3420","Type":"ContainerDied","Data":"7b0eb78ad49b0f6308da6f264521dae46391514392d5ea5580ac14a62c0fd47f"} Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.919335 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.919382 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.928781 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.928859 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.975733 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:38 crc kubenswrapper[4926]: I1125 18:32:38.977406 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:39 crc kubenswrapper[4926]: I1125 18:32:39.317279 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:32:39 crc kubenswrapper[4926]: I1125 18:32:39.765534 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-58ffdb7978-lnv9j" Nov 25 18:32:39 crc kubenswrapper[4926]: I1125 18:32:39.846745 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-765875bb4b-tr7fm"] Nov 25 18:32:39 crc kubenswrapper[4926]: I1125 18:32:39.926010 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-765875bb4b-tr7fm" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon-log" containerID="cri-o://1cf829482d4e11d816d3102c1a6f63cdade69e841f157c7d4541ecf4b0799142" gracePeriod=30 Nov 25 18:32:39 crc kubenswrapper[4926]: I1125 18:32:39.926136 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-765875bb4b-tr7fm" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" containerID="cri-o://852a73a980fb903c7c9cee66c146d5af7a6a6f5e72cce623c7519f6abb47b4ff" gracePeriod=30 Nov 25 18:32:39 crc kubenswrapper[4926]: I1125 18:32:39.927548 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:39 crc kubenswrapper[4926]: I1125 18:32:39.927574 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:40 crc kubenswrapper[4926]: I1125 18:32:40.956528 4926 generic.go:334] "Generic (PLEG): container finished" podID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerID="852a73a980fb903c7c9cee66c146d5af7a6a6f5e72cce623c7519f6abb47b4ff" exitCode=0 Nov 25 18:32:40 crc kubenswrapper[4926]: I1125 18:32:40.957471 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-765875bb4b-tr7fm" event={"ID":"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55","Type":"ContainerDied","Data":"852a73a980fb903c7c9cee66c146d5af7a6a6f5e72cce623c7519f6abb47b4ff"} Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.070928 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.071456 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.370102 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.420688 4926 scope.go:117] "RemoveContainer" containerID="007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.519096 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.584655 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-combined-ca-bundle\") pod \"017caf97-9f18-49b8-b6e6-597c709e3420\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.584775 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-db-sync-config-data\") pod \"017caf97-9f18-49b8-b6e6-597c709e3420\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.584871 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sr9c7\" (UniqueName: \"kubernetes.io/projected/017caf97-9f18-49b8-b6e6-597c709e3420-kube-api-access-sr9c7\") pod \"017caf97-9f18-49b8-b6e6-597c709e3420\" (UID: \"017caf97-9f18-49b8-b6e6-597c709e3420\") " Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.597949 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "017caf97-9f18-49b8-b6e6-597c709e3420" (UID: "017caf97-9f18-49b8-b6e6-597c709e3420"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.612621 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/017caf97-9f18-49b8-b6e6-597c709e3420-kube-api-access-sr9c7" (OuterVolumeSpecName: "kube-api-access-sr9c7") pod "017caf97-9f18-49b8-b6e6-597c709e3420" (UID: "017caf97-9f18-49b8-b6e6-597c709e3420"). InnerVolumeSpecName "kube-api-access-sr9c7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.631033 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "017caf97-9f18-49b8-b6e6-597c709e3420" (UID: "017caf97-9f18-49b8-b6e6-597c709e3420"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.687939 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.687992 4926 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/017caf97-9f18-49b8-b6e6-597c709e3420-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.688006 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sr9c7\" (UniqueName: \"kubernetes.io/projected/017caf97-9f18-49b8-b6e6-597c709e3420-kube-api-access-sr9c7\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.720512 4926 scope.go:117] "RemoveContainer" containerID="7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719" Nov 25 18:32:41 crc kubenswrapper[4926]: E1125 18:32:41.722018 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719\": container with ID starting with 7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719 not found: ID does not exist" containerID="7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.722074 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719"} err="failed to get container status \"7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719\": rpc error: code = NotFound desc = could not find container \"7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719\": container with ID starting with 7293b68aa9c8e64d65f64d5b0cdf4237792deda3e27d6cf655ec00fdb2e68719 not found: ID does not exist" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.722110 4926 scope.go:117] "RemoveContainer" containerID="007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6" Nov 25 18:32:41 crc kubenswrapper[4926]: E1125 18:32:41.722627 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6\": container with ID starting with 007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6 not found: ID does not exist" containerID="007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.722680 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6"} err="failed to get container status \"007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6\": rpc error: code = NotFound desc = could not find container \"007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6\": container with ID starting with 007dbe498498d368c66111b8ecee27a20cf3ea77719148d369b3bf642009dab6 not found: ID does not exist" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.982076 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x8d9g" Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.982692 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x8d9g" event={"ID":"017caf97-9f18-49b8-b6e6-597c709e3420","Type":"ContainerDied","Data":"efd81a38d1e53937f887af6457db861759d9a6a1992037d40de2684ed56bd9f0"} Nov 25 18:32:41 crc kubenswrapper[4926]: I1125 18:32:41.982721 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="efd81a38d1e53937f887af6457db861759d9a6a1992037d40de2684ed56bd9f0" Nov 25 18:32:42 crc kubenswrapper[4926]: E1125 18:32:42.037094 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.271116 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f9bd866f7-brhxw"] Nov 25 18:32:42 crc kubenswrapper[4926]: W1125 18:32:42.272830 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfb35090_4520_4bdb_bb35_641859591713.slice/crio-6318a8870ef4ad1ff6226fe7f5236b65fef5cd16af33d7cc3c0832fcc406599b WatchSource:0}: Error finding container 6318a8870ef4ad1ff6226fe7f5236b65fef5cd16af33d7cc3c0832fcc406599b: Status 404 returned error can't find the container with id 6318a8870ef4ad1ff6226fe7f5236b65fef5cd16af33d7cc3c0832fcc406599b Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.278624 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.278801 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.283682 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.412863 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-bbf6864d-nswqp"] Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.469193 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.598280 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-87c6cbb57-5vxgs"] Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.890427 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7"] Nov 25 18:32:42 crc kubenswrapper[4926]: E1125 18:32:42.891323 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="017caf97-9f18-49b8-b6e6-597c709e3420" containerName="barbican-db-sync" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.891463 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="017caf97-9f18-49b8-b6e6-597c709e3420" containerName="barbican-db-sync" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.892008 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="017caf97-9f18-49b8-b6e6-597c709e3420" containerName="barbican-db-sync" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.893699 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.898252 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.898457 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.898662 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-44z7v" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.910265 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5c446c68b5-c59jz"] Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.911994 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.916862 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.916948 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-combined-ca-bundle\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917054 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-config-data\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917084 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-config-data\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917112 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c84d99-1576-439c-86b0-bc90f22a286f-logs\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917137 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-config-data-custom\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917196 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrgc2\" (UniqueName: \"kubernetes.io/projected/14c84d99-1576-439c-86b0-bc90f22a286f-kube-api-access-lrgc2\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917219 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lzzd\" (UniqueName: \"kubernetes.io/projected/82906e7a-0022-49ce-8cf9-10366d783d5e-kube-api-access-7lzzd\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917257 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82906e7a-0022-49ce-8cf9-10366d783d5e-logs\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917392 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-combined-ca-bundle\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.917439 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-config-data-custom\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.931580 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5c446c68b5-c59jz"] Nov 25 18:32:42 crc kubenswrapper[4926]: I1125 18:32:42.961786 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7"] Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.035521 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c84d99-1576-439c-86b0-bc90f22a286f-logs\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.035626 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-config-data-custom\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.035786 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrgc2\" (UniqueName: \"kubernetes.io/projected/14c84d99-1576-439c-86b0-bc90f22a286f-kube-api-access-lrgc2\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.035841 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lzzd\" (UniqueName: \"kubernetes.io/projected/82906e7a-0022-49ce-8cf9-10366d783d5e-kube-api-access-7lzzd\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.035910 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82906e7a-0022-49ce-8cf9-10366d783d5e-logs\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.035978 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-combined-ca-bundle\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.036052 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-config-data-custom\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.036144 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-combined-ca-bundle\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.036247 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-config-data\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.036286 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-config-data\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.037786 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c84d99-1576-439c-86b0-bc90f22a286f-logs\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.050206 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerStarted","Data":"e312c6c6f1d44513f5d0adc61f403bc8cbec2a417fca038bf738477226633bb3"} Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.050474 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="ceilometer-notification-agent" containerID="cri-o://470e3756a0af50afe7d5c93b51683a49c9eecdbd66fa37c5c6cc96a5a8c73033" gracePeriod=30 Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.051099 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.051534 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="proxy-httpd" containerID="cri-o://e312c6c6f1d44513f5d0adc61f403bc8cbec2a417fca038bf738477226633bb3" gracePeriod=30 Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.051601 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="sg-core" containerID="cri-o://7a921d4be478da69a6cb59d15f69a0232d6a23c2d93d18a19ead11cedde6bee8" gracePeriod=30 Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.067330 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-config-data\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.086185 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-config-data-custom\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.086973 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82906e7a-0022-49ce-8cf9-10366d783d5e-logs\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.090043 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87c6cbb57-5vxgs" event={"ID":"6e439ef2-db64-41e2-853a-a16e48f1607d","Type":"ContainerStarted","Data":"618a34a856b6b9294437cc71cee5cf77da21d677e568c3f7e7a0a2f9477a64ac"} Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.113836 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-combined-ca-bundle\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.116629 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f9bd866f7-brhxw"] Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.116677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" event={"ID":"dfb35090-4520-4bdb-bb35-641859591713","Type":"ContainerStarted","Data":"6318a8870ef4ad1ff6226fe7f5236b65fef5cd16af33d7cc3c0832fcc406599b"} Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.122620 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-combined-ca-bundle\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.145109 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82906e7a-0022-49ce-8cf9-10366d783d5e-config-data\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.158912 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lzzd\" (UniqueName: \"kubernetes.io/projected/82906e7a-0022-49ce-8cf9-10366d783d5e-kube-api-access-7lzzd\") pod \"barbican-keystone-listener-5dc4bc78cd-2jtj7\" (UID: \"82906e7a-0022-49ce-8cf9-10366d783d5e\") " pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.159027 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/14c84d99-1576-439c-86b0-bc90f22a286f-config-data-custom\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.163294 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrgc2\" (UniqueName: \"kubernetes.io/projected/14c84d99-1576-439c-86b0-bc90f22a286f-kube-api-access-lrgc2\") pod \"barbican-worker-5c446c68b5-c59jz\" (UID: \"14c84d99-1576-439c-86b0-bc90f22a286f\") " pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.168343 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerStarted","Data":"18049ed143129eba5b75add6cecb7eda1ea6f28de604f6de81c39421f6bf79a0"} Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.185191 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bbf6864d-nswqp" event={"ID":"42900e2c-5c74-417f-a60c-6955d8c0fc29","Type":"ContainerStarted","Data":"196e76d80ec3b641f7a9a0a71395d9d9605b393fd2e8e49df37197f210372db6"} Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.199412 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e85535cf-74ad-4608-9752-f44beb920a02","Type":"ContainerStarted","Data":"167549e0c548abbdf888d7a5a28d5a7d7958160bea15808482e6e87c8cf1ce63"} Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.215651 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6fdfcd589c-z2h42"] Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.222791 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.235610 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6fdfcd589c-z2h42"] Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.239267 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.250191 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-85c985f676-t2gwp"] Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.253619 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.259888 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-85c985f676-t2gwp"] Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.261900 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5c446c68b5-c59jz" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.264299 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368337 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6593e75-121b-466f-ae57-9e947296042d-logs\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368421 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-swift-storage-0\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368490 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-svc\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368532 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-combined-ca-bundle\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368572 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stmht\" (UniqueName: \"kubernetes.io/projected/817eea09-537e-42fa-9b5d-c012a71342be-kube-api-access-stmht\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368590 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data-custom\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368631 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-sb\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368659 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhvpf\" (UniqueName: \"kubernetes.io/projected/f6593e75-121b-466f-ae57-9e947296042d-kube-api-access-lhvpf\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368690 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-config\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368718 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.368740 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-nb\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471253 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-sb\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471681 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhvpf\" (UniqueName: \"kubernetes.io/projected/f6593e75-121b-466f-ae57-9e947296042d-kube-api-access-lhvpf\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471725 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-config\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471755 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471780 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-nb\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471807 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6593e75-121b-466f-ae57-9e947296042d-logs\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471825 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-swift-storage-0\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471877 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-svc\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471910 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-combined-ca-bundle\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471955 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stmht\" (UniqueName: \"kubernetes.io/projected/817eea09-537e-42fa-9b5d-c012a71342be-kube-api-access-stmht\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.471973 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data-custom\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.472196 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-sb\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.472507 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6593e75-121b-466f-ae57-9e947296042d-logs\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.473504 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-svc\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.474098 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-swift-storage-0\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.474638 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-config\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.479982 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-nb\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.481966 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-combined-ca-bundle\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.496119 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data-custom\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.508251 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhvpf\" (UniqueName: \"kubernetes.io/projected/f6593e75-121b-466f-ae57-9e947296042d-kube-api-access-lhvpf\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.511950 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stmht\" (UniqueName: \"kubernetes.io/projected/817eea09-537e-42fa-9b5d-c012a71342be-kube-api-access-stmht\") pod \"dnsmasq-dns-6fdfcd589c-z2h42\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.513030 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data\") pod \"barbican-api-85c985f676-t2gwp\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.589199 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.607250 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:43 crc kubenswrapper[4926]: I1125 18:32:43.849177 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7"] Nov 25 18:32:43 crc kubenswrapper[4926]: W1125 18:32:43.928547 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82906e7a_0022_49ce_8cf9_10366d783d5e.slice/crio-effbffc3fb8af192fd79f4f9da259098898567ef593319ac969ba5206a16fe7d WatchSource:0}: Error finding container effbffc3fb8af192fd79f4f9da259098898567ef593319ac969ba5206a16fe7d: Status 404 returned error can't find the container with id effbffc3fb8af192fd79f4f9da259098898567ef593319ac969ba5206a16fe7d Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.154076 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5c446c68b5-c59jz"] Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.249917 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" event={"ID":"dfb35090-4520-4bdb-bb35-641859591713","Type":"ContainerStarted","Data":"4bb438bb6399666a1f248509e9779d29c7270bb04095d06e2078b5886e98af8b"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.250145 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" podUID="dfb35090-4520-4bdb-bb35-641859591713" containerName="init" containerID="cri-o://4bb438bb6399666a1f248509e9779d29c7270bb04095d06e2078b5886e98af8b" gracePeriod=10 Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.267774 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bbf6864d-nswqp" event={"ID":"42900e2c-5c74-417f-a60c-6955d8c0fc29","Type":"ContainerStarted","Data":"2ad57f255b3a7c599d030c35520841e08c4cce4188725f9f8b75179bd1e32703"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.270767 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5c446c68b5-c59jz" event={"ID":"14c84d99-1576-439c-86b0-bc90f22a286f","Type":"ContainerStarted","Data":"64e2b1044e708652990a2bfbdc14d7c9421817a3ccb96eabd1352051f0444100"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.281535 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e85535cf-74ad-4608-9752-f44beb920a02","Type":"ContainerStarted","Data":"e3733f8ddbde327c3055482f338e090bae9f5eb13f17d9add04e12d8c89d0dca"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.290735 4926 generic.go:334] "Generic (PLEG): container finished" podID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerID="e312c6c6f1d44513f5d0adc61f403bc8cbec2a417fca038bf738477226633bb3" exitCode=0 Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.290769 4926 generic.go:334] "Generic (PLEG): container finished" podID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerID="7a921d4be478da69a6cb59d15f69a0232d6a23c2d93d18a19ead11cedde6bee8" exitCode=2 Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.290819 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerDied","Data":"e312c6c6f1d44513f5d0adc61f403bc8cbec2a417fca038bf738477226633bb3"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.290849 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerDied","Data":"7a921d4be478da69a6cb59d15f69a0232d6a23c2d93d18a19ead11cedde6bee8"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.312983 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87c6cbb57-5vxgs" event={"ID":"6e439ef2-db64-41e2-853a-a16e48f1607d","Type":"ContainerStarted","Data":"a65a1a60b7696863e577f1c3668069c9aebca1940e73236a675da087c10d2744"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.321684 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" event={"ID":"82906e7a-0022-49ce-8cf9-10366d783d5e","Type":"ContainerStarted","Data":"effbffc3fb8af192fd79f4f9da259098898567ef593319ac969ba5206a16fe7d"} Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.411199 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6fdfcd589c-z2h42"] Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.535496 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-85c985f676-t2gwp"] Nov 25 18:32:44 crc kubenswrapper[4926]: W1125 18:32:44.572780 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6593e75_121b_466f_ae57_9e947296042d.slice/crio-77977abe8ca73b8e46d099784e263643fa4907bf86948d5ae68447272805073f WatchSource:0}: Error finding container 77977abe8ca73b8e46d099784e263643fa4907bf86948d5ae68447272805073f: Status 404 returned error can't find the container with id 77977abe8ca73b8e46d099784e263643fa4907bf86948d5ae68447272805073f Nov 25 18:32:44 crc kubenswrapper[4926]: I1125 18:32:44.914766 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.010798 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.318117 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-765875bb4b-tr7fm" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.161:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.161:8443: connect: connection refused" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.351674 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e85535cf-74ad-4608-9752-f44beb920a02","Type":"ContainerStarted","Data":"8350043fadb7e598e7e181c222efd1f6623e4b26b064ab1c24d34da7708cbc20"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.374982 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87c6cbb57-5vxgs" event={"ID":"6e439ef2-db64-41e2-853a-a16e48f1607d","Type":"ContainerStarted","Data":"433ac5ffb94d16b1c56474442644668192e7afb3b1110cda0c80b39ddb5f65bb"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.381395 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" event={"ID":"817eea09-537e-42fa-9b5d-c012a71342be","Type":"ContainerStarted","Data":"5d45d1b59cf12a284abe1496b5678e99989dbdcff60abb9fe3545033a6cbf414"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.381463 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" event={"ID":"817eea09-537e-42fa-9b5d-c012a71342be","Type":"ContainerStarted","Data":"5a9d2ea2ae920c6d25f3df0ba3f8455a04524c8bb62a9b84b80b7ba34ceba137"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.386748 4926 generic.go:334] "Generic (PLEG): container finished" podID="dfb35090-4520-4bdb-bb35-641859591713" containerID="4bb438bb6399666a1f248509e9779d29c7270bb04095d06e2078b5886e98af8b" exitCode=0 Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.386882 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" event={"ID":"dfb35090-4520-4bdb-bb35-641859591713","Type":"ContainerDied","Data":"4bb438bb6399666a1f248509e9779d29c7270bb04095d06e2078b5886e98af8b"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.390137 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c985f676-t2gwp" event={"ID":"f6593e75-121b-466f-ae57-9e947296042d","Type":"ContainerStarted","Data":"ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.390177 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c985f676-t2gwp" event={"ID":"f6593e75-121b-466f-ae57-9e947296042d","Type":"ContainerStarted","Data":"77977abe8ca73b8e46d099784e263643fa4907bf86948d5ae68447272805073f"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.445066 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bbf6864d-nswqp" event={"ID":"42900e2c-5c74-417f-a60c-6955d8c0fc29","Type":"ContainerStarted","Data":"745ee863d21e18e961f2f65c719dab8530a6229a44af738fc91abef8cf8241d7"} Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.446150 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.527793 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.699957 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.742053 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-swift-storage-0\") pod \"dfb35090-4520-4bdb-bb35-641859591713\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.742143 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-nb\") pod \"dfb35090-4520-4bdb-bb35-641859591713\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.742231 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb\") pod \"dfb35090-4520-4bdb-bb35-641859591713\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.742264 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-svc\") pod \"dfb35090-4520-4bdb-bb35-641859591713\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.742363 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzf46\" (UniqueName: \"kubernetes.io/projected/dfb35090-4520-4bdb-bb35-641859591713-kube-api-access-rzf46\") pod \"dfb35090-4520-4bdb-bb35-641859591713\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.742458 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-config\") pod \"dfb35090-4520-4bdb-bb35-641859591713\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.794796 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfb35090-4520-4bdb-bb35-641859591713-kube-api-access-rzf46" (OuterVolumeSpecName: "kube-api-access-rzf46") pod "dfb35090-4520-4bdb-bb35-641859591713" (UID: "dfb35090-4520-4bdb-bb35-641859591713"). InnerVolumeSpecName "kube-api-access-rzf46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.845774 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzf46\" (UniqueName: \"kubernetes.io/projected/dfb35090-4520-4bdb-bb35-641859591713-kube-api-access-rzf46\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.887320 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dfb35090-4520-4bdb-bb35-641859591713" (UID: "dfb35090-4520-4bdb-bb35-641859591713"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.902123 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dfb35090-4520-4bdb-bb35-641859591713" (UID: "dfb35090-4520-4bdb-bb35-641859591713"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.944316 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dfb35090-4520-4bdb-bb35-641859591713" (UID: "dfb35090-4520-4bdb-bb35-641859591713"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.956051 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dfb35090-4520-4bdb-bb35-641859591713" (UID: "dfb35090-4520-4bdb-bb35-641859591713"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.988079 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb\") pod \"dfb35090-4520-4bdb-bb35-641859591713\" (UID: \"dfb35090-4520-4bdb-bb35-641859591713\") " Nov 25 18:32:45 crc kubenswrapper[4926]: W1125 18:32:45.989688 4926 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/dfb35090-4520-4bdb-bb35-641859591713/volumes/kubernetes.io~configmap/ovsdbserver-sb Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.990847 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dfb35090-4520-4bdb-bb35-641859591713" (UID: "dfb35090-4520-4bdb-bb35-641859591713"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.992066 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.992144 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.992287 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.992357 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:45 crc kubenswrapper[4926]: I1125 18:32:45.995108 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-config" (OuterVolumeSpecName: "config") pod "dfb35090-4520-4bdb-bb35-641859591713" (UID: "dfb35090-4520-4bdb-bb35-641859591713"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.097223 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfb35090-4520-4bdb-bb35-641859591713-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.460531 4926 generic.go:334] "Generic (PLEG): container finished" podID="817eea09-537e-42fa-9b5d-c012a71342be" containerID="5d45d1b59cf12a284abe1496b5678e99989dbdcff60abb9fe3545033a6cbf414" exitCode=0 Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.460594 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" event={"ID":"817eea09-537e-42fa-9b5d-c012a71342be","Type":"ContainerDied","Data":"5d45d1b59cf12a284abe1496b5678e99989dbdcff60abb9fe3545033a6cbf414"} Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.502739 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" event={"ID":"dfb35090-4520-4bdb-bb35-641859591713","Type":"ContainerDied","Data":"6318a8870ef4ad1ff6226fe7f5236b65fef5cd16af33d7cc3c0832fcc406599b"} Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.502811 4926 scope.go:117] "RemoveContainer" containerID="4bb438bb6399666a1f248509e9779d29c7270bb04095d06e2078b5886e98af8b" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.502773 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f9bd866f7-brhxw" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.562576 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-bbf6864d-nswqp" podStartSLOduration=11.562540827 podStartE2EDuration="11.562540827s" podCreationTimestamp="2025-11-25 18:32:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:46.542498188 +0000 UTC m=+1196.928011793" watchObservedRunningTime="2025-11-25 18:32:46.562540827 +0000 UTC m=+1196.948054532" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.570820 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=12.570797087 podStartE2EDuration="12.570797087s" podCreationTimestamp="2025-11-25 18:32:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:46.563672017 +0000 UTC m=+1196.949185622" watchObservedRunningTime="2025-11-25 18:32:46.570797087 +0000 UTC m=+1196.956310702" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.615284 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-87c6cbb57-5vxgs" podStartSLOduration=9.615261707 podStartE2EDuration="9.615261707s" podCreationTimestamp="2025-11-25 18:32:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:46.61302881 +0000 UTC m=+1196.998542425" watchObservedRunningTime="2025-11-25 18:32:46.615261707 +0000 UTC m=+1197.000775312" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.700596 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f9bd866f7-brhxw"] Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.716984 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f9bd866f7-brhxw"] Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.888154 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5f9c5dcd5b-mpcsb"] Nov 25 18:32:46 crc kubenswrapper[4926]: E1125 18:32:46.897947 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfb35090-4520-4bdb-bb35-641859591713" containerName="init" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.898003 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfb35090-4520-4bdb-bb35-641859591713" containerName="init" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.898443 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfb35090-4520-4bdb-bb35-641859591713" containerName="init" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.899816 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.903879 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.904080 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 18:32:46 crc kubenswrapper[4926]: I1125 18:32:46.931764 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f9c5dcd5b-mpcsb"] Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.043135 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-public-tls-certs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.043407 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-config-data-custom\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.043542 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07453d33-7ce0-41da-bfbe-f496f1035621-logs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.043619 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znt5s\" (UniqueName: \"kubernetes.io/projected/07453d33-7ce0-41da-bfbe-f496f1035621-kube-api-access-znt5s\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.043704 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-combined-ca-bundle\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.043802 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-internal-tls-certs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.043916 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-config-data\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.146075 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-public-tls-certs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.146142 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-config-data-custom\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.146181 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07453d33-7ce0-41da-bfbe-f496f1035621-logs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.146210 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znt5s\" (UniqueName: \"kubernetes.io/projected/07453d33-7ce0-41da-bfbe-f496f1035621-kube-api-access-znt5s\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.146262 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-combined-ca-bundle\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.146291 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-internal-tls-certs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.146362 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-config-data\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.147200 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07453d33-7ce0-41da-bfbe-f496f1035621-logs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.153083 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-internal-tls-certs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.153232 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-config-data-custom\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.153535 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-combined-ca-bundle\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.154215 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-config-data\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.159117 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07453d33-7ce0-41da-bfbe-f496f1035621-public-tls-certs\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.161977 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znt5s\" (UniqueName: \"kubernetes.io/projected/07453d33-7ce0-41da-bfbe-f496f1035621-kube-api-access-znt5s\") pod \"barbican-api-5f9c5dcd5b-mpcsb\" (UID: \"07453d33-7ce0-41da-bfbe-f496f1035621\") " pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.246159 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.516588 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c985f676-t2gwp" event={"ID":"f6593e75-121b-466f-ae57-9e947296042d","Type":"ContainerStarted","Data":"6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59"} Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.516727 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.518533 4926 generic.go:334] "Generic (PLEG): container finished" podID="166aef77-b73a-497a-886d-a66a548bff2d" containerID="ffe20d0efc9a99274bf3de7e22f73e790bad18f6d2bf38dcedd11ca992f5dd59" exitCode=137 Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.518555 4926 generic.go:334] "Generic (PLEG): container finished" podID="166aef77-b73a-497a-886d-a66a548bff2d" containerID="a63627d9165dbcc99c0946b1b85b68d0edd0b9c9870131543642a2ed93d7f691" exitCode=137 Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.518555 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd689864c-5qb7v" event={"ID":"166aef77-b73a-497a-886d-a66a548bff2d","Type":"ContainerDied","Data":"ffe20d0efc9a99274bf3de7e22f73e790bad18f6d2bf38dcedd11ca992f5dd59"} Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.518589 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd689864c-5qb7v" event={"ID":"166aef77-b73a-497a-886d-a66a548bff2d","Type":"ContainerDied","Data":"a63627d9165dbcc99c0946b1b85b68d0edd0b9c9870131543642a2ed93d7f691"} Nov 25 18:32:47 crc kubenswrapper[4926]: I1125 18:32:47.536075 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-85c985f676-t2gwp" podStartSLOduration=4.536055001 podStartE2EDuration="4.536055001s" podCreationTimestamp="2025-11-25 18:32:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:47.530543321 +0000 UTC m=+1197.916056926" watchObservedRunningTime="2025-11-25 18:32:47.536055001 +0000 UTC m=+1197.921568606" Nov 25 18:32:48 crc kubenswrapper[4926]: I1125 18:32:48.343760 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfb35090-4520-4bdb-bb35-641859591713" path="/var/lib/kubelet/pods/dfb35090-4520-4bdb-bb35-641859591713/volumes" Nov 25 18:32:48 crc kubenswrapper[4926]: I1125 18:32:48.577020 4926 generic.go:334] "Generic (PLEG): container finished" podID="9f664100-2926-4e80-a06e-5c09021eb736" containerID="18049ed143129eba5b75add6cecb7eda1ea6f28de604f6de81c39421f6bf79a0" exitCode=1 Nov 25 18:32:48 crc kubenswrapper[4926]: I1125 18:32:48.577120 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerDied","Data":"18049ed143129eba5b75add6cecb7eda1ea6f28de604f6de81c39421f6bf79a0"} Nov 25 18:32:48 crc kubenswrapper[4926]: I1125 18:32:48.577741 4926 scope.go:117] "RemoveContainer" containerID="18049ed143129eba5b75add6cecb7eda1ea6f28de604f6de81c39421f6bf79a0" Nov 25 18:32:48 crc kubenswrapper[4926]: I1125 18:32:48.578556 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:48 crc kubenswrapper[4926]: E1125 18:32:48.580130 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(9f664100-2926-4e80-a06e-5c09021eb736)\"" pod="openstack/watcher-decision-engine-0" podUID="9f664100-2926-4e80-a06e-5c09021eb736" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.318517 4926 scope.go:117] "RemoveContainer" containerID="5807d3d885f8cd1c4a345edd91ce53d054005453d20263b419aa19f25f38c7ac" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.398802 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.493009 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-config-data\") pod \"166aef77-b73a-497a-886d-a66a548bff2d\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.493422 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-scripts\") pod \"166aef77-b73a-497a-886d-a66a548bff2d\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.493542 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166aef77-b73a-497a-886d-a66a548bff2d-logs\") pod \"166aef77-b73a-497a-886d-a66a548bff2d\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.493561 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn4gq\" (UniqueName: \"kubernetes.io/projected/166aef77-b73a-497a-886d-a66a548bff2d-kube-api-access-fn4gq\") pod \"166aef77-b73a-497a-886d-a66a548bff2d\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.493625 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/166aef77-b73a-497a-886d-a66a548bff2d-horizon-secret-key\") pod \"166aef77-b73a-497a-886d-a66a548bff2d\" (UID: \"166aef77-b73a-497a-886d-a66a548bff2d\") " Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.496706 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/166aef77-b73a-497a-886d-a66a548bff2d-logs" (OuterVolumeSpecName: "logs") pod "166aef77-b73a-497a-886d-a66a548bff2d" (UID: "166aef77-b73a-497a-886d-a66a548bff2d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.508507 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/166aef77-b73a-497a-886d-a66a548bff2d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "166aef77-b73a-497a-886d-a66a548bff2d" (UID: "166aef77-b73a-497a-886d-a66a548bff2d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.511620 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/166aef77-b73a-497a-886d-a66a548bff2d-kube-api-access-fn4gq" (OuterVolumeSpecName: "kube-api-access-fn4gq") pod "166aef77-b73a-497a-886d-a66a548bff2d" (UID: "166aef77-b73a-497a-886d-a66a548bff2d"). InnerVolumeSpecName "kube-api-access-fn4gq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.535028 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-scripts" (OuterVolumeSpecName: "scripts") pod "166aef77-b73a-497a-886d-a66a548bff2d" (UID: "166aef77-b73a-497a-886d-a66a548bff2d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.537909 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-config-data" (OuterVolumeSpecName: "config-data") pod "166aef77-b73a-497a-886d-a66a548bff2d" (UID: "166aef77-b73a-497a-886d-a66a548bff2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.596847 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.596882 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/166aef77-b73a-497a-886d-a66a548bff2d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.596891 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166aef77-b73a-497a-886d-a66a548bff2d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.596900 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn4gq\" (UniqueName: \"kubernetes.io/projected/166aef77-b73a-497a-886d-a66a548bff2d-kube-api-access-fn4gq\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.596911 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/166aef77-b73a-497a-886d-a66a548bff2d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.661045 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd689864c-5qb7v" event={"ID":"166aef77-b73a-497a-886d-a66a548bff2d","Type":"ContainerDied","Data":"a7a0505f647f70070835b00f050a718e0a06aac972d5d775fb30bf26d4938645"} Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.661137 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7fd689864c-5qb7v" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.707760 4926 generic.go:334] "Generic (PLEG): container finished" podID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerID="470e3756a0af50afe7d5c93b51683a49c9eecdbd66fa37c5c6cc96a5a8c73033" exitCode=0 Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.708235 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerDied","Data":"470e3756a0af50afe7d5c93b51683a49c9eecdbd66fa37c5c6cc96a5a8c73033"} Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.733449 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7fd689864c-5qb7v"] Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.736849 4926 scope.go:117] "RemoveContainer" containerID="ffe20d0efc9a99274bf3de7e22f73e790bad18f6d2bf38dcedd11ca992f5dd59" Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.743121 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7fd689864c-5qb7v"] Nov 25 18:32:49 crc kubenswrapper[4926]: I1125 18:32:49.985088 4926 scope.go:117] "RemoveContainer" containerID="a63627d9165dbcc99c0946b1b85b68d0edd0b9c9870131543642a2ed93d7f691" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.023859 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.114733 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-sg-core-conf-yaml\") pod \"102b4780-5da7-4b86-9679-e87417b4ee5a\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.115085 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-config-data\") pod \"102b4780-5da7-4b86-9679-e87417b4ee5a\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.115109 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-combined-ca-bundle\") pod \"102b4780-5da7-4b86-9679-e87417b4ee5a\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.115204 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-log-httpd\") pod \"102b4780-5da7-4b86-9679-e87417b4ee5a\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.115266 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gw92\" (UniqueName: \"kubernetes.io/projected/102b4780-5da7-4b86-9679-e87417b4ee5a-kube-api-access-9gw92\") pod \"102b4780-5da7-4b86-9679-e87417b4ee5a\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.115306 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-run-httpd\") pod \"102b4780-5da7-4b86-9679-e87417b4ee5a\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.115330 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-scripts\") pod \"102b4780-5da7-4b86-9679-e87417b4ee5a\" (UID: \"102b4780-5da7-4b86-9679-e87417b4ee5a\") " Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.116164 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "102b4780-5da7-4b86-9679-e87417b4ee5a" (UID: "102b4780-5da7-4b86-9679-e87417b4ee5a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.116568 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "102b4780-5da7-4b86-9679-e87417b4ee5a" (UID: "102b4780-5da7-4b86-9679-e87417b4ee5a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.120922 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/102b4780-5da7-4b86-9679-e87417b4ee5a-kube-api-access-9gw92" (OuterVolumeSpecName: "kube-api-access-9gw92") pod "102b4780-5da7-4b86-9679-e87417b4ee5a" (UID: "102b4780-5da7-4b86-9679-e87417b4ee5a"). InnerVolumeSpecName "kube-api-access-9gw92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.121085 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-scripts" (OuterVolumeSpecName: "scripts") pod "102b4780-5da7-4b86-9679-e87417b4ee5a" (UID: "102b4780-5da7-4b86-9679-e87417b4ee5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.142788 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "102b4780-5da7-4b86-9679-e87417b4ee5a" (UID: "102b4780-5da7-4b86-9679-e87417b4ee5a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.183042 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f9c5dcd5b-mpcsb"] Nov 25 18:32:50 crc kubenswrapper[4926]: W1125 18:32:50.188821 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07453d33_7ce0_41da_bfbe_f496f1035621.slice/crio-4f5def68d0f0d97256222640755010841cec43867b4f55d3e18b71eeaa1c8e7b WatchSource:0}: Error finding container 4f5def68d0f0d97256222640755010841cec43867b4f55d3e18b71eeaa1c8e7b: Status 404 returned error can't find the container with id 4f5def68d0f0d97256222640755010841cec43867b4f55d3e18b71eeaa1c8e7b Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.192206 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "102b4780-5da7-4b86-9679-e87417b4ee5a" (UID: "102b4780-5da7-4b86-9679-e87417b4ee5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.220732 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.220760 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.220772 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.220782 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.220791 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gw92\" (UniqueName: \"kubernetes.io/projected/102b4780-5da7-4b86-9679-e87417b4ee5a-kube-api-access-9gw92\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.220800 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/102b4780-5da7-4b86-9679-e87417b4ee5a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.230509 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-config-data" (OuterVolumeSpecName: "config-data") pod "102b4780-5da7-4b86-9679-e87417b4ee5a" (UID: "102b4780-5da7-4b86-9679-e87417b4ee5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.322196 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b4780-5da7-4b86-9679-e87417b4ee5a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.322946 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.326519 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.346623 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="166aef77-b73a-497a-886d-a66a548bff2d" path="/var/lib/kubelet/pods/166aef77-b73a-497a-886d-a66a548bff2d/volumes" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.736206 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" event={"ID":"817eea09-537e-42fa-9b5d-c012a71342be","Type":"ContainerStarted","Data":"fb039c7ded5fd5b219eb7a400d1eb1df757ec2eefd0efbae3ad3ddc65165fca6"} Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.737566 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.765726 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" podStartSLOduration=7.765708973 podStartE2EDuration="7.765708973s" podCreationTimestamp="2025-11-25 18:32:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:50.762161882 +0000 UTC m=+1201.147675487" watchObservedRunningTime="2025-11-25 18:32:50.765708973 +0000 UTC m=+1201.151222578" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.766641 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5c446c68b5-c59jz" event={"ID":"14c84d99-1576-439c-86b0-bc90f22a286f","Type":"ContainerStarted","Data":"17b3e042e18bc46251b1e782a093596eb438d115b7a4d8f5de128e7be29ec4bf"} Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.778473 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.778355 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"102b4780-5da7-4b86-9679-e87417b4ee5a","Type":"ContainerDied","Data":"8ad478716dbcd108968d441e8c96875dd7521bfb7415051754ed48bbfa0b2aad"} Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.778554 4926 scope.go:117] "RemoveContainer" containerID="e312c6c6f1d44513f5d0adc61f403bc8cbec2a417fca038bf738477226633bb3" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.786300 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" event={"ID":"07453d33-7ce0-41da-bfbe-f496f1035621","Type":"ContainerStarted","Data":"7467847b43011edd6f50ef3044d4cb92a52d2b4299a94ad76e16799b91c06e7f"} Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.786340 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" event={"ID":"07453d33-7ce0-41da-bfbe-f496f1035621","Type":"ContainerStarted","Data":"4f5def68d0f0d97256222640755010841cec43867b4f55d3e18b71eeaa1c8e7b"} Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.787602 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.788702 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" event={"ID":"82906e7a-0022-49ce-8cf9-10366d783d5e","Type":"ContainerStarted","Data":"08c5d5dfc810b7cef65143ca8a31a72b3aa3989410ed7e3a227cf5cd98a84c8c"} Nov 25 18:32:50 crc kubenswrapper[4926]: E1125 18:32:50.803856 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod102b4780_5da7_4b86_9679_e87417b4ee5a.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod102b4780_5da7_4b86_9679_e87417b4ee5a.slice/crio-8ad478716dbcd108968d441e8c96875dd7521bfb7415051754ed48bbfa0b2aad\": RecentStats: unable to find data in memory cache]" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.856453 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.864436 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.878282 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:32:50 crc kubenswrapper[4926]: E1125 18:32:50.878855 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="ceilometer-notification-agent" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.878875 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="ceilometer-notification-agent" Nov 25 18:32:50 crc kubenswrapper[4926]: E1125 18:32:50.878901 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="proxy-httpd" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.878909 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="proxy-httpd" Nov 25 18:32:50 crc kubenswrapper[4926]: E1125 18:32:50.878929 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.878938 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon" Nov 25 18:32:50 crc kubenswrapper[4926]: E1125 18:32:50.878956 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="sg-core" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.878963 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="sg-core" Nov 25 18:32:50 crc kubenswrapper[4926]: E1125 18:32:50.878974 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon-log" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.878984 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon-log" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.884685 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="proxy-httpd" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.884746 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon-log" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.884761 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="ceilometer-notification-agent" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.884785 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="166aef77-b73a-497a-886d-a66a548bff2d" containerName="horizon" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.884798 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" containerName="sg-core" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.887524 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.887672 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.890650 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.897849 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.935794 4926 scope.go:117] "RemoveContainer" containerID="7a921d4be478da69a6cb59d15f69a0232d6a23c2d93d18a19ead11cedde6bee8" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.951939 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r2m9\" (UniqueName: \"kubernetes.io/projected/2add9e5e-d863-4ecc-9778-c932b0532956-kube-api-access-2r2m9\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.952167 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.952322 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-log-httpd\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.952419 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-config-data\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.952521 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-scripts\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.952604 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.952681 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-run-httpd\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:50 crc kubenswrapper[4926]: I1125 18:32:50.960572 4926 scope.go:117] "RemoveContainer" containerID="470e3756a0af50afe7d5c93b51683a49c9eecdbd66fa37c5c6cc96a5a8c73033" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.055095 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r2m9\" (UniqueName: \"kubernetes.io/projected/2add9e5e-d863-4ecc-9778-c932b0532956-kube-api-access-2r2m9\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.055412 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.055535 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-log-httpd\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.055618 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-config-data\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.055695 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-scripts\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.055761 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.055949 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-run-httpd\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.056345 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-run-httpd\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.056460 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-log-httpd\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.074402 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-scripts\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.074518 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.074879 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.074913 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-config-data\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.078519 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r2m9\" (UniqueName: \"kubernetes.io/projected/2add9e5e-d863-4ecc-9778-c932b0532956-kube-api-access-2r2m9\") pod \"ceilometer-0\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.213638 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.741126 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.800823 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerStarted","Data":"7d649be1828ec9e5a8776f82b2dbc722bca5b2f00d5e4f4c83a501154fcb736b"} Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.803843 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" event={"ID":"07453d33-7ce0-41da-bfbe-f496f1035621","Type":"ContainerStarted","Data":"fb50cdf8224e7bd13f86286ac2bbf207a944e21317368c0cc7d5dda8c4a5ab2b"} Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.804098 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.804127 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.806490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" event={"ID":"82906e7a-0022-49ce-8cf9-10366d783d5e","Type":"ContainerStarted","Data":"290f827681e911f9c89cc849d7815dfdeb15da57ead4917857b8f2de100b0d94"} Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.808736 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5c446c68b5-c59jz" event={"ID":"14c84d99-1576-439c-86b0-bc90f22a286f","Type":"ContainerStarted","Data":"4847cde49629394d04b76dc554ec65995ad09823d2a7c4c6a6e25ced210df47c"} Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.853828 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" podStartSLOduration=5.853807286 podStartE2EDuration="5.853807286s" podCreationTimestamp="2025-11-25 18:32:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:51.82958072 +0000 UTC m=+1202.215094325" watchObservedRunningTime="2025-11-25 18:32:51.853807286 +0000 UTC m=+1202.239320901" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.860571 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5c446c68b5-c59jz" podStartSLOduration=4.534897925 podStartE2EDuration="9.860553207s" podCreationTimestamp="2025-11-25 18:32:42 +0000 UTC" firstStartedPulling="2025-11-25 18:32:44.14662106 +0000 UTC m=+1194.532134655" lastFinishedPulling="2025-11-25 18:32:49.472276342 +0000 UTC m=+1199.857789937" observedRunningTime="2025-11-25 18:32:51.848575293 +0000 UTC m=+1202.234088898" watchObservedRunningTime="2025-11-25 18:32:51.860553207 +0000 UTC m=+1202.246066812" Nov 25 18:32:51 crc kubenswrapper[4926]: I1125 18:32:51.884863 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5dc4bc78cd-2jtj7" podStartSLOduration=4.271959044 podStartE2EDuration="9.884842004s" podCreationTimestamp="2025-11-25 18:32:42 +0000 UTC" firstStartedPulling="2025-11-25 18:32:43.94550965 +0000 UTC m=+1194.331023255" lastFinishedPulling="2025-11-25 18:32:49.55839261 +0000 UTC m=+1199.943906215" observedRunningTime="2025-11-25 18:32:51.873095426 +0000 UTC m=+1202.258609041" watchObservedRunningTime="2025-11-25 18:32:51.884842004 +0000 UTC m=+1202.270355609" Nov 25 18:32:52 crc kubenswrapper[4926]: I1125 18:32:52.339899 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="102b4780-5da7-4b86-9679-e87417b4ee5a" path="/var/lib/kubelet/pods/102b4780-5da7-4b86-9679-e87417b4ee5a/volumes" Nov 25 18:32:52 crc kubenswrapper[4926]: I1125 18:32:52.572731 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 25 18:32:52 crc kubenswrapper[4926]: I1125 18:32:52.844426 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerStarted","Data":"4d98227633c8f09c886dfd83d431321cc347e53dba2bfaead5bb3c5e38436249"} Nov 25 18:32:52 crc kubenswrapper[4926]: I1125 18:32:52.848714 4926 generic.go:334] "Generic (PLEG): container finished" podID="0138f286-e018-42de-b145-2cda09144394" containerID="520ea0e9d8dd3748068d6b0c4b1f21c3312cb88dd7caa9da843d0bd092df244a" exitCode=0 Nov 25 18:32:52 crc kubenswrapper[4926]: I1125 18:32:52.848908 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jnbt6" event={"ID":"0138f286-e018-42de-b145-2cda09144394","Type":"ContainerDied","Data":"520ea0e9d8dd3748068d6b0c4b1f21c3312cb88dd7caa9da843d0bd092df244a"} Nov 25 18:32:53 crc kubenswrapper[4926]: I1125 18:32:53.860897 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerStarted","Data":"e3bb368b566e83dfbfef46792d7aa559564869d6ef4474b4dd80f5cd916d46e4"} Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.335858 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.425603 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpc52\" (UniqueName: \"kubernetes.io/projected/0138f286-e018-42de-b145-2cda09144394-kube-api-access-lpc52\") pod \"0138f286-e018-42de-b145-2cda09144394\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.425755 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-config-data\") pod \"0138f286-e018-42de-b145-2cda09144394\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.425836 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-combined-ca-bundle\") pod \"0138f286-e018-42de-b145-2cda09144394\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.425918 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-db-sync-config-data\") pod \"0138f286-e018-42de-b145-2cda09144394\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.425989 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-scripts\") pod \"0138f286-e018-42de-b145-2cda09144394\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.426171 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0138f286-e018-42de-b145-2cda09144394-etc-machine-id\") pod \"0138f286-e018-42de-b145-2cda09144394\" (UID: \"0138f286-e018-42de-b145-2cda09144394\") " Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.427747 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0138f286-e018-42de-b145-2cda09144394-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0138f286-e018-42de-b145-2cda09144394" (UID: "0138f286-e018-42de-b145-2cda09144394"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.430710 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0138f286-e018-42de-b145-2cda09144394-kube-api-access-lpc52" (OuterVolumeSpecName: "kube-api-access-lpc52") pod "0138f286-e018-42de-b145-2cda09144394" (UID: "0138f286-e018-42de-b145-2cda09144394"). InnerVolumeSpecName "kube-api-access-lpc52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.434594 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0138f286-e018-42de-b145-2cda09144394" (UID: "0138f286-e018-42de-b145-2cda09144394"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.434678 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-scripts" (OuterVolumeSpecName: "scripts") pod "0138f286-e018-42de-b145-2cda09144394" (UID: "0138f286-e018-42de-b145-2cda09144394"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.475456 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0138f286-e018-42de-b145-2cda09144394" (UID: "0138f286-e018-42de-b145-2cda09144394"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.499018 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-config-data" (OuterVolumeSpecName: "config-data") pod "0138f286-e018-42de-b145-2cda09144394" (UID: "0138f286-e018-42de-b145-2cda09144394"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.528419 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpc52\" (UniqueName: \"kubernetes.io/projected/0138f286-e018-42de-b145-2cda09144394-kube-api-access-lpc52\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.528454 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.528464 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.528472 4926 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.528480 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0138f286-e018-42de-b145-2cda09144394-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.528488 4926 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0138f286-e018-42de-b145-2cda09144394-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.891019 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerStarted","Data":"325a126e5dc054dc07e98e78ed06dbe83cdff707ac49c258c75e5df380fc7918"} Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.896055 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jnbt6" event={"ID":"0138f286-e018-42de-b145-2cda09144394","Type":"ContainerDied","Data":"b8fc94354958fe153cb45b06179b3d522a7b0e0fe5540293447b458e2afcd26c"} Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.896236 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8fc94354958fe153cb45b06179b3d522a7b0e0fe5540293447b458e2afcd26c" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.896170 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jnbt6" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.914236 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.914276 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.914287 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:32:54 crc kubenswrapper[4926]: I1125 18:32:54.915041 4926 scope.go:117] "RemoveContainer" containerID="18049ed143129eba5b75add6cecb7eda1ea6f28de604f6de81c39421f6bf79a0" Nov 25 18:32:54 crc kubenswrapper[4926]: E1125 18:32:54.915309 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(9f664100-2926-4e80-a06e-5c09021eb736)\"" pod="openstack/watcher-decision-engine-0" podUID="9f664100-2926-4e80-a06e-5c09021eb736" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.064577 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.168971 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.286812 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:32:55 crc kubenswrapper[4926]: E1125 18:32:55.287826 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0138f286-e018-42de-b145-2cda09144394" containerName="cinder-db-sync" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.287862 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0138f286-e018-42de-b145-2cda09144394" containerName="cinder-db-sync" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.288156 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0138f286-e018-42de-b145-2cda09144394" containerName="cinder-db-sync" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.289331 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.299527 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.299725 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.299892 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-28tsb" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.300023 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.320361 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.322564 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-765875bb4b-tr7fm" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.161:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.161:8443: connect: connection refused" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.323488 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.354826 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.354970 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.355023 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrml4\" (UniqueName: \"kubernetes.io/projected/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-kube-api-access-mrml4\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.355074 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.355091 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-scripts\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.355123 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.365164 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6fdfcd589c-z2h42"] Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.366654 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.366813 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" podUID="817eea09-537e-42fa-9b5d-c012a71342be" containerName="dnsmasq-dns" containerID="cri-o://fb039c7ded5fd5b219eb7a400d1eb1df757ec2eefd0efbae3ad3ddc65165fca6" gracePeriod=10 Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.397687 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.401253 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8cdb477f-d95t9"] Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.402919 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.431544 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8cdb477f-d95t9"] Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458662 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458703 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-scripts\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458729 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-svc\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458762 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-nb\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458785 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458813 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458835 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-config\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458912 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-sb\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458940 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458975 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-swift-storage-0\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.458994 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrml4\" (UniqueName: \"kubernetes.io/projected/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-kube-api-access-mrml4\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.459024 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwpk2\" (UniqueName: \"kubernetes.io/projected/c59068f7-7f7a-4de3-b312-9f40bfd46128-kube-api-access-lwpk2\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.459117 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.468132 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-scripts\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.469458 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.480360 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.495518 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.496220 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrml4\" (UniqueName: \"kubernetes.io/projected/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-kube-api-access-mrml4\") pod \"cinder-scheduler-0\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.526451 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.528418 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.532636 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.549009 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.566924 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-config\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567120 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567205 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567255 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-scripts\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567291 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-sb\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567324 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-logs\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567437 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dddwh\" (UniqueName: \"kubernetes.io/projected/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-kube-api-access-dddwh\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567470 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-swift-storage-0\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567551 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwpk2\" (UniqueName: \"kubernetes.io/projected/c59068f7-7f7a-4de3-b312-9f40bfd46128-kube-api-access-lwpk2\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567621 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-svc\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567681 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-nb\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567727 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data-custom\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.567753 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.569011 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-config\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.569989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-svc\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.571545 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-nb\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.572559 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-swift-storage-0\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.573668 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-sb\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.600869 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwpk2\" (UniqueName: \"kubernetes.io/projected/c59068f7-7f7a-4de3-b312-9f40bfd46128-kube-api-access-lwpk2\") pod \"dnsmasq-dns-8cdb477f-d95t9\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.630262 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.666078 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.669306 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dddwh\" (UniqueName: \"kubernetes.io/projected/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-kube-api-access-dddwh\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.670306 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data-custom\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.670428 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.670597 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.670733 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.670842 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-scripts\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.671247 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-logs\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.672187 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-logs\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.671069 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.679343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.684130 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-scripts\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.701525 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.713928 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dddwh\" (UniqueName: \"kubernetes.io/projected/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-kube-api-access-dddwh\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.729000 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data-custom\") pod \"cinder-api-0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " pod="openstack/cinder-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.912801 4926 generic.go:334] "Generic (PLEG): container finished" podID="817eea09-537e-42fa-9b5d-c012a71342be" containerID="fb039c7ded5fd5b219eb7a400d1eb1df757ec2eefd0efbae3ad3ddc65165fca6" exitCode=0 Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.913261 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" event={"ID":"817eea09-537e-42fa-9b5d-c012a71342be","Type":"ContainerDied","Data":"fb039c7ded5fd5b219eb7a400d1eb1df757ec2eefd0efbae3ad3ddc65165fca6"} Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.925202 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 25 18:32:55 crc kubenswrapper[4926]: I1125 18:32:55.988689 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.145769 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.192735 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-config\") pod \"817eea09-537e-42fa-9b5d-c012a71342be\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.192849 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-nb\") pod \"817eea09-537e-42fa-9b5d-c012a71342be\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.192900 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-sb\") pod \"817eea09-537e-42fa-9b5d-c012a71342be\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.192918 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-svc\") pod \"817eea09-537e-42fa-9b5d-c012a71342be\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.193037 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stmht\" (UniqueName: \"kubernetes.io/projected/817eea09-537e-42fa-9b5d-c012a71342be-kube-api-access-stmht\") pod \"817eea09-537e-42fa-9b5d-c012a71342be\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.193145 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-swift-storage-0\") pod \"817eea09-537e-42fa-9b5d-c012a71342be\" (UID: \"817eea09-537e-42fa-9b5d-c012a71342be\") " Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.207170 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/817eea09-537e-42fa-9b5d-c012a71342be-kube-api-access-stmht" (OuterVolumeSpecName: "kube-api-access-stmht") pod "817eea09-537e-42fa-9b5d-c012a71342be" (UID: "817eea09-537e-42fa-9b5d-c012a71342be"). InnerVolumeSpecName "kube-api-access-stmht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.298046 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stmht\" (UniqueName: \"kubernetes.io/projected/817eea09-537e-42fa-9b5d-c012a71342be-kube-api-access-stmht\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.344446 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "817eea09-537e-42fa-9b5d-c012a71342be" (UID: "817eea09-537e-42fa-9b5d-c012a71342be"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.362407 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "817eea09-537e-42fa-9b5d-c012a71342be" (UID: "817eea09-537e-42fa-9b5d-c012a71342be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.362901 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "817eea09-537e-42fa-9b5d-c012a71342be" (UID: "817eea09-537e-42fa-9b5d-c012a71342be"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.369937 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "817eea09-537e-42fa-9b5d-c012a71342be" (UID: "817eea09-537e-42fa-9b5d-c012a71342be"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.377862 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-config" (OuterVolumeSpecName: "config") pod "817eea09-537e-42fa-9b5d-c012a71342be" (UID: "817eea09-537e-42fa-9b5d-c012a71342be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.399914 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.400157 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.400223 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.400281 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.400337 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/817eea09-537e-42fa-9b5d-c012a71342be-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.601622 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.645107 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8cdb477f-d95t9"] Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.854824 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.945058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerStarted","Data":"eb6bf2935b1bdf4739f66fadc3dfa9aabc995cc5b1e8cddab8d31d731a94e18e"} Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.946010 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.952455 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4","Type":"ContainerStarted","Data":"be589561c8cad080fe46ed81c815148bd0b27842a60ba27d941117fb8d88ba59"} Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.954010 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" event={"ID":"c59068f7-7f7a-4de3-b312-9f40bfd46128","Type":"ContainerStarted","Data":"32e08a98c700a1afc37077a24ed302528eea0addd0f7483ebd8db70d9307c7b0"} Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.959153 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.959243 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fdfcd589c-z2h42" event={"ID":"817eea09-537e-42fa-9b5d-c012a71342be","Type":"ContainerDied","Data":"5a9d2ea2ae920c6d25f3df0ba3f8455a04524c8bb62a9b84b80b7ba34ceba137"} Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.960615 4926 scope.go:117] "RemoveContainer" containerID="fb039c7ded5fd5b219eb7a400d1eb1df757ec2eefd0efbae3ad3ddc65165fca6" Nov 25 18:32:56 crc kubenswrapper[4926]: I1125 18:32:56.972597 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.179891846 podStartE2EDuration="6.972574782s" podCreationTimestamp="2025-11-25 18:32:50 +0000 UTC" firstStartedPulling="2025-11-25 18:32:51.743822012 +0000 UTC m=+1202.129335617" lastFinishedPulling="2025-11-25 18:32:55.536504948 +0000 UTC m=+1205.922018553" observedRunningTime="2025-11-25 18:32:56.96463293 +0000 UTC m=+1207.350146535" watchObservedRunningTime="2025-11-25 18:32:56.972574782 +0000 UTC m=+1207.358088387" Nov 25 18:32:57 crc kubenswrapper[4926]: I1125 18:32:57.026458 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6fdfcd589c-z2h42"] Nov 25 18:32:57 crc kubenswrapper[4926]: I1125 18:32:57.032065 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6fdfcd589c-z2h42"] Nov 25 18:32:57 crc kubenswrapper[4926]: I1125 18:32:57.240159 4926 scope.go:117] "RemoveContainer" containerID="5d45d1b59cf12a284abe1496b5678e99989dbdcff60abb9fe3545033a6cbf414" Nov 25 18:32:57 crc kubenswrapper[4926]: W1125 18:32:57.256195 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb22e6f5c_1ec7_4049_9453_502fae4b1ca0.slice/crio-d04a10cf3e32407799a55b2a641d1c0ece5ad41111326925401de83f81287330 WatchSource:0}: Error finding container d04a10cf3e32407799a55b2a641d1c0ece5ad41111326925401de83f81287330: Status 404 returned error can't find the container with id d04a10cf3e32407799a55b2a641d1c0ece5ad41111326925401de83f81287330 Nov 25 18:32:57 crc kubenswrapper[4926]: I1125 18:32:57.980571 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:32:57 crc kubenswrapper[4926]: I1125 18:32:57.989963 4926 generic.go:334] "Generic (PLEG): container finished" podID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerID="2eefbb6fff89446d845401b685dabf9ac0ed331636e88358f5125f9408e38ff9" exitCode=0 Nov 25 18:32:57 crc kubenswrapper[4926]: I1125 18:32:57.990273 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" event={"ID":"c59068f7-7f7a-4de3-b312-9f40bfd46128","Type":"ContainerDied","Data":"2eefbb6fff89446d845401b685dabf9ac0ed331636e88358f5125f9408e38ff9"} Nov 25 18:32:57 crc kubenswrapper[4926]: I1125 18:32:57.999570 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22e6f5c-1ec7-4049-9453-502fae4b1ca0","Type":"ContainerStarted","Data":"d04a10cf3e32407799a55b2a641d1c0ece5ad41111326925401de83f81287330"} Nov 25 18:32:58 crc kubenswrapper[4926]: I1125 18:32:58.352328 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="817eea09-537e-42fa-9b5d-c012a71342be" path="/var/lib/kubelet/pods/817eea09-537e-42fa-9b5d-c012a71342be/volumes" Nov 25 18:32:58 crc kubenswrapper[4926]: I1125 18:32:58.797267 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:59 crc kubenswrapper[4926]: I1125 18:32:59.019718 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-547c7d8d4d-wq9d8" Nov 25 18:32:59 crc kubenswrapper[4926]: I1125 18:32:59.060789 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4","Type":"ContainerStarted","Data":"ec688fb6ad6f0873a99c56b77c5456c4054c5619d822d1ecb1d54779ae23c2a4"} Nov 25 18:32:59 crc kubenswrapper[4926]: I1125 18:32:59.067228 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" event={"ID":"c59068f7-7f7a-4de3-b312-9f40bfd46128","Type":"ContainerStarted","Data":"e09585bb6fb7be6abc4d3de84cfdf5a04056492375e8e4257b0e9896a134e7b8"} Nov 25 18:32:59 crc kubenswrapper[4926]: I1125 18:32:59.068641 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:32:59 crc kubenswrapper[4926]: I1125 18:32:59.086658 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22e6f5c-1ec7-4049-9453-502fae4b1ca0","Type":"ContainerStarted","Data":"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559"} Nov 25 18:32:59 crc kubenswrapper[4926]: I1125 18:32:59.104949 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" podStartSLOduration=4.104936086 podStartE2EDuration="4.104936086s" podCreationTimestamp="2025-11-25 18:32:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:32:59.095755283 +0000 UTC m=+1209.481268888" watchObservedRunningTime="2025-11-25 18:32:59.104936086 +0000 UTC m=+1209.490449691" Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.099329 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4","Type":"ContainerStarted","Data":"0bb8be6193c0e28f102e9b0a317a069c359a76e7a02fde71348581142eb687ea"} Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.110587 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22e6f5c-1ec7-4049-9453-502fae4b1ca0","Type":"ContainerStarted","Data":"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369"} Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.110649 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.110671 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api-log" containerID="cri-o://39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559" gracePeriod=30 Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.110713 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api" containerID="cri-o://085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369" gracePeriod=30 Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.124398 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.934850555 podStartE2EDuration="5.124380255s" podCreationTimestamp="2025-11-25 18:32:55 +0000 UTC" firstStartedPulling="2025-11-25 18:32:56.606446661 +0000 UTC m=+1206.991960266" lastFinishedPulling="2025-11-25 18:32:57.795976361 +0000 UTC m=+1208.181489966" observedRunningTime="2025-11-25 18:33:00.118887186 +0000 UTC m=+1210.504400791" watchObservedRunningTime="2025-11-25 18:33:00.124380255 +0000 UTC m=+1210.509893850" Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.150687 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.150653623 podStartE2EDuration="5.150653623s" podCreationTimestamp="2025-11-25 18:32:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:33:00.148083108 +0000 UTC m=+1210.533596713" watchObservedRunningTime="2025-11-25 18:33:00.150653623 +0000 UTC m=+1210.536167228" Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.315811 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.501239 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f9c5dcd5b-mpcsb" Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.562289 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-85c985f676-t2gwp"] Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.562524 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-85c985f676-t2gwp" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api-log" containerID="cri-o://ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157" gracePeriod=30 Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.562913 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-85c985f676-t2gwp" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api" containerID="cri-o://6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59" gracePeriod=30 Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.631504 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 18:33:00 crc kubenswrapper[4926]: I1125 18:33:00.771275 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7dc866d94f-flgn2" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.043294 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.127886 4926 generic.go:334] "Generic (PLEG): container finished" podID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerID="085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369" exitCode=0 Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.127924 4926 generic.go:334] "Generic (PLEG): container finished" podID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerID="39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559" exitCode=143 Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.127972 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22e6f5c-1ec7-4049-9453-502fae4b1ca0","Type":"ContainerDied","Data":"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369"} Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.128001 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22e6f5c-1ec7-4049-9453-502fae4b1ca0","Type":"ContainerDied","Data":"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559"} Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.128011 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b22e6f5c-1ec7-4049-9453-502fae4b1ca0","Type":"ContainerDied","Data":"d04a10cf3e32407799a55b2a641d1c0ece5ad41111326925401de83f81287330"} Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.128026 4926 scope.go:117] "RemoveContainer" containerID="085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.128179 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.133777 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dddwh\" (UniqueName: \"kubernetes.io/projected/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-kube-api-access-dddwh\") pod \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.133929 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data\") pod \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.134062 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-etc-machine-id\") pod \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.134142 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-logs\") pod \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.134276 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data-custom\") pod \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.134409 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-scripts\") pod \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.134477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-combined-ca-bundle\") pod \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\" (UID: \"b22e6f5c-1ec7-4049-9453-502fae4b1ca0\") " Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.136653 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-logs" (OuterVolumeSpecName: "logs") pod "b22e6f5c-1ec7-4049-9453-502fae4b1ca0" (UID: "b22e6f5c-1ec7-4049-9453-502fae4b1ca0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.136718 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b22e6f5c-1ec7-4049-9453-502fae4b1ca0" (UID: "b22e6f5c-1ec7-4049-9453-502fae4b1ca0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.140602 4926 generic.go:334] "Generic (PLEG): container finished" podID="f6593e75-121b-466f-ae57-9e947296042d" containerID="ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157" exitCode=143 Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.141701 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c985f676-t2gwp" event={"ID":"f6593e75-121b-466f-ae57-9e947296042d","Type":"ContainerDied","Data":"ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157"} Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.151438 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-kube-api-access-dddwh" (OuterVolumeSpecName: "kube-api-access-dddwh") pod "b22e6f5c-1ec7-4049-9453-502fae4b1ca0" (UID: "b22e6f5c-1ec7-4049-9453-502fae4b1ca0"). InnerVolumeSpecName "kube-api-access-dddwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.163712 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b22e6f5c-1ec7-4049-9453-502fae4b1ca0" (UID: "b22e6f5c-1ec7-4049-9453-502fae4b1ca0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.173476 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-scripts" (OuterVolumeSpecName: "scripts") pod "b22e6f5c-1ec7-4049-9453-502fae4b1ca0" (UID: "b22e6f5c-1ec7-4049-9453-502fae4b1ca0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.181240 4926 scope.go:117] "RemoveContainer" containerID="39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.240075 4926 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.240100 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.240128 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dddwh\" (UniqueName: \"kubernetes.io/projected/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-kube-api-access-dddwh\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.240140 4926 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.240151 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.278525 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data" (OuterVolumeSpecName: "config-data") pod "b22e6f5c-1ec7-4049-9453-502fae4b1ca0" (UID: "b22e6f5c-1ec7-4049-9453-502fae4b1ca0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.286978 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b22e6f5c-1ec7-4049-9453-502fae4b1ca0" (UID: "b22e6f5c-1ec7-4049-9453-502fae4b1ca0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.295027 4926 scope.go:117] "RemoveContainer" containerID="085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369" Nov 25 18:33:01 crc kubenswrapper[4926]: E1125 18:33:01.297718 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369\": container with ID starting with 085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369 not found: ID does not exist" containerID="085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.297761 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369"} err="failed to get container status \"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369\": rpc error: code = NotFound desc = could not find container \"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369\": container with ID starting with 085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369 not found: ID does not exist" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.297786 4926 scope.go:117] "RemoveContainer" containerID="39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559" Nov 25 18:33:01 crc kubenswrapper[4926]: E1125 18:33:01.298680 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559\": container with ID starting with 39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559 not found: ID does not exist" containerID="39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.298714 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559"} err="failed to get container status \"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559\": rpc error: code = NotFound desc = could not find container \"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559\": container with ID starting with 39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559 not found: ID does not exist" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.298730 4926 scope.go:117] "RemoveContainer" containerID="085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.301691 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369"} err="failed to get container status \"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369\": rpc error: code = NotFound desc = could not find container \"085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369\": container with ID starting with 085603b751b00300e4816de97738a5222ff251d15a6dde072d85707ed1114369 not found: ID does not exist" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.301717 4926 scope.go:117] "RemoveContainer" containerID="39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.329831 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559"} err="failed to get container status \"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559\": rpc error: code = NotFound desc = could not find container \"39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559\": container with ID starting with 39088edbbde08ce4556f85f2e747a87694549f69fa12244461815b1c43a6c559 not found: ID does not exist" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.341658 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.341681 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b22e6f5c-1ec7-4049-9453-502fae4b1ca0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.481338 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.488979 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.516879 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:33:01 crc kubenswrapper[4926]: E1125 18:33:01.519927 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817eea09-537e-42fa-9b5d-c012a71342be" containerName="dnsmasq-dns" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.519947 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="817eea09-537e-42fa-9b5d-c012a71342be" containerName="dnsmasq-dns" Nov 25 18:33:01 crc kubenswrapper[4926]: E1125 18:33:01.519967 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817eea09-537e-42fa-9b5d-c012a71342be" containerName="init" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.519974 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="817eea09-537e-42fa-9b5d-c012a71342be" containerName="init" Nov 25 18:33:01 crc kubenswrapper[4926]: E1125 18:33:01.519995 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api-log" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.520002 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api-log" Nov 25 18:33:01 crc kubenswrapper[4926]: E1125 18:33:01.520020 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.520028 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.520200 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.520226 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" containerName="cinder-api-log" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.520240 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="817eea09-537e-42fa-9b5d-c012a71342be" containerName="dnsmasq-dns" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.521300 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.532182 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.540438 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.540588 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.540689 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.647690 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.647800 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cttn\" (UniqueName: \"kubernetes.io/projected/ad60f4ee-1959-48b8-9ac6-ba95313bd024-kube-api-access-8cttn\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.647874 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-scripts\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.647959 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad60f4ee-1959-48b8-9ac6-ba95313bd024-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.648036 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-config-data\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.648177 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-config-data-custom\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.648266 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.648326 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.648710 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad60f4ee-1959-48b8-9ac6-ba95313bd024-logs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751552 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-config-data-custom\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751613 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751635 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751692 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad60f4ee-1959-48b8-9ac6-ba95313bd024-logs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751730 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cttn\" (UniqueName: \"kubernetes.io/projected/ad60f4ee-1959-48b8-9ac6-ba95313bd024-kube-api-access-8cttn\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751771 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-scripts\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751801 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad60f4ee-1959-48b8-9ac6-ba95313bd024-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.751830 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-config-data\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.752208 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad60f4ee-1959-48b8-9ac6-ba95313bd024-logs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.752271 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad60f4ee-1959-48b8-9ac6-ba95313bd024-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.755986 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-config-data-custom\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.756311 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.759223 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-config-data\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.759689 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-scripts\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.760900 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.771113 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad60f4ee-1959-48b8-9ac6-ba95313bd024-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.777453 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cttn\" (UniqueName: \"kubernetes.io/projected/ad60f4ee-1959-48b8-9ac6-ba95313bd024-kube-api-access-8cttn\") pod \"cinder-api-0\" (UID: \"ad60f4ee-1959-48b8-9ac6-ba95313bd024\") " pod="openstack/cinder-api-0" Nov 25 18:33:01 crc kubenswrapper[4926]: I1125 18:33:01.852936 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.223990 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.225640 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.228991 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.229035 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-7mfxv" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.229203 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.234397 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.348160 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b22e6f5c-1ec7-4049-9453-502fae4b1ca0" path="/var/lib/kubelet/pods/b22e6f5c-1ec7-4049-9453-502fae4b1ca0/volumes" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.349337 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.362363 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/436e3ae7-0a2a-4cba-9416-804f6cba9b26-openstack-config-secret\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.362425 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436e3ae7-0a2a-4cba-9416-804f6cba9b26-combined-ca-bundle\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.362522 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/436e3ae7-0a2a-4cba-9416-804f6cba9b26-openstack-config\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.362576 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x48d4\" (UniqueName: \"kubernetes.io/projected/436e3ae7-0a2a-4cba-9416-804f6cba9b26-kube-api-access-x48d4\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.464573 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/436e3ae7-0a2a-4cba-9416-804f6cba9b26-openstack-config\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.464655 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x48d4\" (UniqueName: \"kubernetes.io/projected/436e3ae7-0a2a-4cba-9416-804f6cba9b26-kube-api-access-x48d4\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.464695 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/436e3ae7-0a2a-4cba-9416-804f6cba9b26-openstack-config-secret\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.464714 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436e3ae7-0a2a-4cba-9416-804f6cba9b26-combined-ca-bundle\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.466672 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/436e3ae7-0a2a-4cba-9416-804f6cba9b26-openstack-config\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.470487 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436e3ae7-0a2a-4cba-9416-804f6cba9b26-combined-ca-bundle\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.474990 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/436e3ae7-0a2a-4cba-9416-804f6cba9b26-openstack-config-secret\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.497764 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x48d4\" (UniqueName: \"kubernetes.io/projected/436e3ae7-0a2a-4cba-9416-804f6cba9b26-kube-api-access-x48d4\") pod \"openstackclient\" (UID: \"436e3ae7-0a2a-4cba-9416-804f6cba9b26\") " pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.591403 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.665836 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.772089 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-combined-ca-bundle\") pod \"f6593e75-121b-466f-ae57-9e947296042d\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.772590 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhvpf\" (UniqueName: \"kubernetes.io/projected/f6593e75-121b-466f-ae57-9e947296042d-kube-api-access-lhvpf\") pod \"f6593e75-121b-466f-ae57-9e947296042d\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.772671 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6593e75-121b-466f-ae57-9e947296042d-logs\") pod \"f6593e75-121b-466f-ae57-9e947296042d\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.772784 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data\") pod \"f6593e75-121b-466f-ae57-9e947296042d\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.772820 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data-custom\") pod \"f6593e75-121b-466f-ae57-9e947296042d\" (UID: \"f6593e75-121b-466f-ae57-9e947296042d\") " Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.773510 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6593e75-121b-466f-ae57-9e947296042d-logs" (OuterVolumeSpecName: "logs") pod "f6593e75-121b-466f-ae57-9e947296042d" (UID: "f6593e75-121b-466f-ae57-9e947296042d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.776913 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6593e75-121b-466f-ae57-9e947296042d-kube-api-access-lhvpf" (OuterVolumeSpecName: "kube-api-access-lhvpf") pod "f6593e75-121b-466f-ae57-9e947296042d" (UID: "f6593e75-121b-466f-ae57-9e947296042d"). InnerVolumeSpecName "kube-api-access-lhvpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.783623 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f6593e75-121b-466f-ae57-9e947296042d" (UID: "f6593e75-121b-466f-ae57-9e947296042d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.807547 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6593e75-121b-466f-ae57-9e947296042d" (UID: "f6593e75-121b-466f-ae57-9e947296042d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.850109 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data" (OuterVolumeSpecName: "config-data") pod "f6593e75-121b-466f-ae57-9e947296042d" (UID: "f6593e75-121b-466f-ae57-9e947296042d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.875418 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.875450 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhvpf\" (UniqueName: \"kubernetes.io/projected/f6593e75-121b-466f-ae57-9e947296042d-kube-api-access-lhvpf\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.875463 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6593e75-121b-466f-ae57-9e947296042d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.875476 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:02 crc kubenswrapper[4926]: I1125 18:33:02.875485 4926 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6593e75-121b-466f-ae57-9e947296042d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.155721 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.180740 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.186577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ad60f4ee-1959-48b8-9ac6-ba95313bd024","Type":"ContainerStarted","Data":"18d67de72784abc532dc111051ec1dcbcf99e5ac8aa3068c7d5bc21b7707e782"} Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.189836 4926 generic.go:334] "Generic (PLEG): container finished" podID="f6593e75-121b-466f-ae57-9e947296042d" containerID="6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59" exitCode=0 Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.189880 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c985f676-t2gwp" event={"ID":"f6593e75-121b-466f-ae57-9e947296042d","Type":"ContainerDied","Data":"6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59"} Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.189905 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c985f676-t2gwp" event={"ID":"f6593e75-121b-466f-ae57-9e947296042d","Type":"ContainerDied","Data":"77977abe8ca73b8e46d099784e263643fa4907bf86948d5ae68447272805073f"} Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.189928 4926 scope.go:117] "RemoveContainer" containerID="6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59" Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.189996 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c985f676-t2gwp" Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.250327 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-85c985f676-t2gwp"] Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.251840 4926 scope.go:117] "RemoveContainer" containerID="ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157" Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.261004 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-85c985f676-t2gwp"] Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.289920 4926 scope.go:117] "RemoveContainer" containerID="6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59" Nov 25 18:33:03 crc kubenswrapper[4926]: E1125 18:33:03.290608 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59\": container with ID starting with 6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59 not found: ID does not exist" containerID="6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59" Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.290658 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59"} err="failed to get container status \"6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59\": rpc error: code = NotFound desc = could not find container \"6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59\": container with ID starting with 6a71b9ea097b773f98320747e42651f99add44d9c4d8e40c24db5768dff41b59 not found: ID does not exist" Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.290694 4926 scope.go:117] "RemoveContainer" containerID="ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157" Nov 25 18:33:03 crc kubenswrapper[4926]: E1125 18:33:03.291341 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157\": container with ID starting with ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157 not found: ID does not exist" containerID="ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157" Nov 25 18:33:03 crc kubenswrapper[4926]: I1125 18:33:03.291447 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157"} err="failed to get container status \"ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157\": rpc error: code = NotFound desc = could not find container \"ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157\": container with ID starting with ee3fd2cac0d4a6eaf2f34d0d75d209ff59dcc1bc98416dfbf4c406bcb9ef7157 not found: ID does not exist" Nov 25 18:33:04 crc kubenswrapper[4926]: I1125 18:33:04.210822 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ad60f4ee-1959-48b8-9ac6-ba95313bd024","Type":"ContainerStarted","Data":"c0cf2d51f2b15800706fe490b719a5b9baf55ef53fa4cf6a7efda7acf3acd336"} Nov 25 18:33:04 crc kubenswrapper[4926]: I1125 18:33:04.211175 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ad60f4ee-1959-48b8-9ac6-ba95313bd024","Type":"ContainerStarted","Data":"7de35aa8ad5df29bb7c044e0734ba52d81f6fb46887ca4963027e2f20278fe0d"} Nov 25 18:33:04 crc kubenswrapper[4926]: I1125 18:33:04.212336 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 18:33:04 crc kubenswrapper[4926]: I1125 18:33:04.227295 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"436e3ae7-0a2a-4cba-9416-804f6cba9b26","Type":"ContainerStarted","Data":"0d29513a91680cc8351d564171399510095e7a2933412ee3fea3054d9e2616ce"} Nov 25 18:33:04 crc kubenswrapper[4926]: I1125 18:33:04.232971 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.232956236 podStartE2EDuration="3.232956236s" podCreationTimestamp="2025-11-25 18:33:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:33:04.229915519 +0000 UTC m=+1214.615429124" watchObservedRunningTime="2025-11-25 18:33:04.232956236 +0000 UTC m=+1214.618469841" Nov 25 18:33:04 crc kubenswrapper[4926]: I1125 18:33:04.344108 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6593e75-121b-466f-ae57-9e947296042d" path="/var/lib/kubelet/pods/f6593e75-121b-466f-ae57-9e947296042d/volumes" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.317710 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-765875bb4b-tr7fm" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.161:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.161:8443: connect: connection refused" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.318095 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.329485 4926 scope.go:117] "RemoveContainer" containerID="18049ed143129eba5b75add6cecb7eda1ea6f28de604f6de81c39421f6bf79a0" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.635223 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.646960 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.668679 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.757768 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c57575f65-q94t4"] Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.758327 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerName="dnsmasq-dns" containerID="cri-o://5863d8d9506af073cfb36662e06645b349ffc1efac9f25630d7aba970161a592" gracePeriod=10 Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.846969 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.904005 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:33:05 crc kubenswrapper[4926]: I1125 18:33:05.934659 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.165:5353: connect: connection refused" Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.270755 4926 generic.go:334] "Generic (PLEG): container finished" podID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerID="5863d8d9506af073cfb36662e06645b349ffc1efac9f25630d7aba970161a592" exitCode=0 Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.270808 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" event={"ID":"673cc698-685c-4cfa-b777-3ea0b418ec78","Type":"ContainerDied","Data":"5863d8d9506af073cfb36662e06645b349ffc1efac9f25630d7aba970161a592"} Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.281112 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerStarted","Data":"56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187"} Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.283470 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="cinder-scheduler" containerID="cri-o://ec688fb6ad6f0873a99c56b77c5456c4054c5619d822d1ecb1d54779ae23c2a4" gracePeriod=30 Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.284004 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="probe" containerID="cri-o://0bb8be6193c0e28f102e9b0a317a069c359a76e7a02fde71348581142eb687ea" gracePeriod=30 Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.836274 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.987922 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-svc\") pod \"673cc698-685c-4cfa-b777-3ea0b418ec78\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.988078 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-config\") pod \"673cc698-685c-4cfa-b777-3ea0b418ec78\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.988308 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-swift-storage-0\") pod \"673cc698-685c-4cfa-b777-3ea0b418ec78\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.988542 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b794l\" (UniqueName: \"kubernetes.io/projected/673cc698-685c-4cfa-b777-3ea0b418ec78-kube-api-access-b794l\") pod \"673cc698-685c-4cfa-b777-3ea0b418ec78\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.988613 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-sb\") pod \"673cc698-685c-4cfa-b777-3ea0b418ec78\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.988687 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-nb\") pod \"673cc698-685c-4cfa-b777-3ea0b418ec78\" (UID: \"673cc698-685c-4cfa-b777-3ea0b418ec78\") " Nov 25 18:33:06 crc kubenswrapper[4926]: I1125 18:33:06.996075 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/673cc698-685c-4cfa-b777-3ea0b418ec78-kube-api-access-b794l" (OuterVolumeSpecName: "kube-api-access-b794l") pod "673cc698-685c-4cfa-b777-3ea0b418ec78" (UID: "673cc698-685c-4cfa-b777-3ea0b418ec78"). InnerVolumeSpecName "kube-api-access-b794l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.049721 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "673cc698-685c-4cfa-b777-3ea0b418ec78" (UID: "673cc698-685c-4cfa-b777-3ea0b418ec78"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.058631 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "673cc698-685c-4cfa-b777-3ea0b418ec78" (UID: "673cc698-685c-4cfa-b777-3ea0b418ec78"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.077156 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-config" (OuterVolumeSpecName: "config") pod "673cc698-685c-4cfa-b777-3ea0b418ec78" (UID: "673cc698-685c-4cfa-b777-3ea0b418ec78"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.084805 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "673cc698-685c-4cfa-b777-3ea0b418ec78" (UID: "673cc698-685c-4cfa-b777-3ea0b418ec78"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.091684 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.091721 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.091734 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.091742 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b794l\" (UniqueName: \"kubernetes.io/projected/673cc698-685c-4cfa-b777-3ea0b418ec78-kube-api-access-b794l\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.091751 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.098716 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "673cc698-685c-4cfa-b777-3ea0b418ec78" (UID: "673cc698-685c-4cfa-b777-3ea0b418ec78"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.193417 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673cc698-685c-4cfa-b777-3ea0b418ec78-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.292427 4926 generic.go:334] "Generic (PLEG): container finished" podID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerID="0bb8be6193c0e28f102e9b0a317a069c359a76e7a02fde71348581142eb687ea" exitCode=0 Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.292490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4","Type":"ContainerDied","Data":"0bb8be6193c0e28f102e9b0a317a069c359a76e7a02fde71348581142eb687ea"} Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.297705 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" event={"ID":"673cc698-685c-4cfa-b777-3ea0b418ec78","Type":"ContainerDied","Data":"60a5d6d12ff36af0bffdbbe5322c8602d0a81955990ca5420f7242dca9c27ed0"} Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.297745 4926 scope.go:117] "RemoveContainer" containerID="5863d8d9506af073cfb36662e06645b349ffc1efac9f25630d7aba970161a592" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.297868 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c57575f65-q94t4" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.338723 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c57575f65-q94t4"] Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.340842 4926 scope.go:117] "RemoveContainer" containerID="bbab1e4f1fc9f6ce89ff733d282ac02e76078f7ef3cf555187ccd93a43022f92" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.354406 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c57575f65-q94t4"] Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618188 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5db9fd79b9-99khc"] Nov 25 18:33:07 crc kubenswrapper[4926]: E1125 18:33:07.618578 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerName="dnsmasq-dns" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618595 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerName="dnsmasq-dns" Nov 25 18:33:07 crc kubenswrapper[4926]: E1125 18:33:07.618615 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618622 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api" Nov 25 18:33:07 crc kubenswrapper[4926]: E1125 18:33:07.618635 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerName="init" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618641 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerName="init" Nov 25 18:33:07 crc kubenswrapper[4926]: E1125 18:33:07.618651 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api-log" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618657 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api-log" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618849 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618864 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" containerName="dnsmasq-dns" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.618876 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6593e75-121b-466f-ae57-9e947296042d" containerName="barbican-api-log" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.620590 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.624354 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.624632 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.625105 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.637603 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5db9fd79b9-99khc"] Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.708907 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3842d4ae-3cdd-48ad-a374-dbf807481f6f-etc-swift\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.708976 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-internal-tls-certs\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.709037 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-combined-ca-bundle\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.709073 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3842d4ae-3cdd-48ad-a374-dbf807481f6f-log-httpd\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.709108 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3842d4ae-3cdd-48ad-a374-dbf807481f6f-run-httpd\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.709444 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-public-tls-certs\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.709513 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q68c4\" (UniqueName: \"kubernetes.io/projected/3842d4ae-3cdd-48ad-a374-dbf807481f6f-kube-api-access-q68c4\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.709692 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-config-data\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.811185 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-public-tls-certs\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.811239 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q68c4\" (UniqueName: \"kubernetes.io/projected/3842d4ae-3cdd-48ad-a374-dbf807481f6f-kube-api-access-q68c4\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.811300 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-config-data\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.811344 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3842d4ae-3cdd-48ad-a374-dbf807481f6f-etc-swift\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.811385 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-internal-tls-certs\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.811545 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-combined-ca-bundle\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.812145 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3842d4ae-3cdd-48ad-a374-dbf807481f6f-log-httpd\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.812179 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3842d4ae-3cdd-48ad-a374-dbf807481f6f-run-httpd\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.812691 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3842d4ae-3cdd-48ad-a374-dbf807481f6f-run-httpd\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.812747 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3842d4ae-3cdd-48ad-a374-dbf807481f6f-log-httpd\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.821494 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-internal-tls-certs\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.821494 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-combined-ca-bundle\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.821502 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-public-tls-certs\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.825581 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3842d4ae-3cdd-48ad-a374-dbf807481f6f-config-data\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.828948 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q68c4\" (UniqueName: \"kubernetes.io/projected/3842d4ae-3cdd-48ad-a374-dbf807481f6f-kube-api-access-q68c4\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.829067 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3842d4ae-3cdd-48ad-a374-dbf807481f6f-etc-swift\") pod \"swift-proxy-5db9fd79b9-99khc\" (UID: \"3842d4ae-3cdd-48ad-a374-dbf807481f6f\") " pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.899435 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.916807 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-87c6cbb57-5vxgs" Nov 25 18:33:07 crc kubenswrapper[4926]: I1125 18:33:07.945862 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.013871 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-bbf6864d-nswqp"] Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.021603 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-bbf6864d-nswqp" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-httpd" containerID="cri-o://745ee863d21e18e961f2f65c719dab8530a6229a44af738fc91abef8cf8241d7" gracePeriod=30 Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.021680 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-bbf6864d-nswqp" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-api" containerID="cri-o://2ad57f255b3a7c599d030c35520841e08c4cce4188725f9f8b75179bd1e32703" gracePeriod=30 Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.335891 4926 generic.go:334] "Generic (PLEG): container finished" podID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerID="745ee863d21e18e961f2f65c719dab8530a6229a44af738fc91abef8cf8241d7" exitCode=0 Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.353691 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="673cc698-685c-4cfa-b777-3ea0b418ec78" path="/var/lib/kubelet/pods/673cc698-685c-4cfa-b777-3ea0b418ec78/volumes" Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.354665 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bbf6864d-nswqp" event={"ID":"42900e2c-5c74-417f-a60c-6955d8c0fc29","Type":"ContainerDied","Data":"745ee863d21e18e961f2f65c719dab8530a6229a44af738fc91abef8cf8241d7"} Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.597319 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.598785 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-central-agent" containerID="cri-o://4d98227633c8f09c886dfd83d431321cc347e53dba2bfaead5bb3c5e38436249" gracePeriod=30 Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.599205 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-notification-agent" containerID="cri-o://e3bb368b566e83dfbfef46792d7aa559564869d6ef4474b4dd80f5cd916d46e4" gracePeriod=30 Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.599228 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="sg-core" containerID="cri-o://325a126e5dc054dc07e98e78ed06dbe83cdff707ac49c258c75e5df380fc7918" gracePeriod=30 Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.599343 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="proxy-httpd" containerID="cri-o://eb6bf2935b1bdf4739f66fadc3dfa9aabc995cc5b1e8cddab8d31d731a94e18e" gracePeriod=30 Nov 25 18:33:08 crc kubenswrapper[4926]: I1125 18:33:08.611731 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.357361 4926 generic.go:334] "Generic (PLEG): container finished" podID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerID="ec688fb6ad6f0873a99c56b77c5456c4054c5619d822d1ecb1d54779ae23c2a4" exitCode=0 Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.357482 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4","Type":"ContainerDied","Data":"ec688fb6ad6f0873a99c56b77c5456c4054c5619d822d1ecb1d54779ae23c2a4"} Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.359748 4926 generic.go:334] "Generic (PLEG): container finished" podID="9f664100-2926-4e80-a06e-5c09021eb736" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" exitCode=1 Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.359807 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerDied","Data":"56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187"} Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.359841 4926 scope.go:117] "RemoveContainer" containerID="18049ed143129eba5b75add6cecb7eda1ea6f28de604f6de81c39421f6bf79a0" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.360652 4926 scope.go:117] "RemoveContainer" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:09 crc kubenswrapper[4926]: E1125 18:33:09.360956 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(9f664100-2926-4e80-a06e-5c09021eb736)\"" pod="openstack/watcher-decision-engine-0" podUID="9f664100-2926-4e80-a06e-5c09021eb736" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.374663 4926 generic.go:334] "Generic (PLEG): container finished" podID="2add9e5e-d863-4ecc-9778-c932b0532956" containerID="eb6bf2935b1bdf4739f66fadc3dfa9aabc995cc5b1e8cddab8d31d731a94e18e" exitCode=0 Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.374713 4926 generic.go:334] "Generic (PLEG): container finished" podID="2add9e5e-d863-4ecc-9778-c932b0532956" containerID="325a126e5dc054dc07e98e78ed06dbe83cdff707ac49c258c75e5df380fc7918" exitCode=2 Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.374722 4926 generic.go:334] "Generic (PLEG): container finished" podID="2add9e5e-d863-4ecc-9778-c932b0532956" containerID="4d98227633c8f09c886dfd83d431321cc347e53dba2bfaead5bb3c5e38436249" exitCode=0 Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.374746 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerDied","Data":"eb6bf2935b1bdf4739f66fadc3dfa9aabc995cc5b1e8cddab8d31d731a94e18e"} Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.374774 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerDied","Data":"325a126e5dc054dc07e98e78ed06dbe83cdff707ac49c258c75e5df380fc7918"} Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.374784 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerDied","Data":"4d98227633c8f09c886dfd83d431321cc347e53dba2bfaead5bb3c5e38436249"} Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.476742 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-ldk4s"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.478007 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.496669 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ldk4s"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.586017 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-fm7m4"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.587271 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.602534 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-fm7m4"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.655104 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znzwx\" (UniqueName: \"kubernetes.io/projected/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-kube-api-access-znzwx\") pod \"nova-api-db-create-ldk4s\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.655180 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-operator-scripts\") pod \"nova-api-db-create-ldk4s\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.679885 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-a61c-account-create-update-ntxbc"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.681254 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.683616 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.688180 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a61c-account-create-update-ntxbc"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.759590 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fb216ae-643a-47e9-9dce-b3f13e633e95-operator-scripts\") pod \"nova-cell0-db-create-fm7m4\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.759694 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znzwx\" (UniqueName: \"kubernetes.io/projected/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-kube-api-access-znzwx\") pod \"nova-api-db-create-ldk4s\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.759773 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-operator-scripts\") pod \"nova-api-db-create-ldk4s\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.759849 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mlw2\" (UniqueName: \"kubernetes.io/projected/8fb216ae-643a-47e9-9dce-b3f13e633e95-kube-api-access-7mlw2\") pod \"nova-cell0-db-create-fm7m4\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.761015 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-operator-scripts\") pod \"nova-api-db-create-ldk4s\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.766153 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-2sw5v"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.767321 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.784475 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-2sw5v"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.790531 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znzwx\" (UniqueName: \"kubernetes.io/projected/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-kube-api-access-znzwx\") pod \"nova-api-db-create-ldk4s\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.833793 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.861742 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mlw2\" (UniqueName: \"kubernetes.io/projected/8fb216ae-643a-47e9-9dce-b3f13e633e95-kube-api-access-7mlw2\") pod \"nova-cell0-db-create-fm7m4\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.861806 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np58f\" (UniqueName: \"kubernetes.io/projected/68f7c172-f0a2-43f2-b4bc-5be36758ea34-kube-api-access-np58f\") pod \"nova-api-a61c-account-create-update-ntxbc\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.861878 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68f7c172-f0a2-43f2-b4bc-5be36758ea34-operator-scripts\") pod \"nova-api-a61c-account-create-update-ntxbc\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.861910 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2xck\" (UniqueName: \"kubernetes.io/projected/79206e63-62c7-45e0-93f3-f30a28d822bb-kube-api-access-h2xck\") pod \"nova-cell1-db-create-2sw5v\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.861934 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fb216ae-643a-47e9-9dce-b3f13e633e95-operator-scripts\") pod \"nova-cell0-db-create-fm7m4\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.861977 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79206e63-62c7-45e0-93f3-f30a28d822bb-operator-scripts\") pod \"nova-cell1-db-create-2sw5v\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.862962 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fb216ae-643a-47e9-9dce-b3f13e633e95-operator-scripts\") pod \"nova-cell0-db-create-fm7m4\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.870944 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-ca6b-account-create-update-9rj6z"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.872160 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.874694 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.887086 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ca6b-account-create-update-9rj6z"] Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.891796 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mlw2\" (UniqueName: \"kubernetes.io/projected/8fb216ae-643a-47e9-9dce-b3f13e633e95-kube-api-access-7mlw2\") pod \"nova-cell0-db-create-fm7m4\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.907855 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.964254 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np58f\" (UniqueName: \"kubernetes.io/projected/68f7c172-f0a2-43f2-b4bc-5be36758ea34-kube-api-access-np58f\") pod \"nova-api-a61c-account-create-update-ntxbc\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.964346 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68f7c172-f0a2-43f2-b4bc-5be36758ea34-operator-scripts\") pod \"nova-api-a61c-account-create-update-ntxbc\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.964419 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2xck\" (UniqueName: \"kubernetes.io/projected/79206e63-62c7-45e0-93f3-f30a28d822bb-kube-api-access-h2xck\") pod \"nova-cell1-db-create-2sw5v\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.964474 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-operator-scripts\") pod \"nova-cell0-ca6b-account-create-update-9rj6z\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.964511 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v6qr\" (UniqueName: \"kubernetes.io/projected/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-kube-api-access-8v6qr\") pod \"nova-cell0-ca6b-account-create-update-9rj6z\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.964546 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79206e63-62c7-45e0-93f3-f30a28d822bb-operator-scripts\") pod \"nova-cell1-db-create-2sw5v\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.965395 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79206e63-62c7-45e0-93f3-f30a28d822bb-operator-scripts\") pod \"nova-cell1-db-create-2sw5v\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.966838 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68f7c172-f0a2-43f2-b4bc-5be36758ea34-operator-scripts\") pod \"nova-api-a61c-account-create-update-ntxbc\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.985056 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np58f\" (UniqueName: \"kubernetes.io/projected/68f7c172-f0a2-43f2-b4bc-5be36758ea34-kube-api-access-np58f\") pod \"nova-api-a61c-account-create-update-ntxbc\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:09 crc kubenswrapper[4926]: I1125 18:33:09.985268 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2xck\" (UniqueName: \"kubernetes.io/projected/79206e63-62c7-45e0-93f3-f30a28d822bb-kube-api-access-h2xck\") pod \"nova-cell1-db-create-2sw5v\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.006759 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.067337 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-operator-scripts\") pod \"nova-cell0-ca6b-account-create-update-9rj6z\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.067468 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v6qr\" (UniqueName: \"kubernetes.io/projected/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-kube-api-access-8v6qr\") pod \"nova-cell0-ca6b-account-create-update-9rj6z\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.068402 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-operator-scripts\") pod \"nova-cell0-ca6b-account-create-update-9rj6z\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.079350 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cda2-account-create-update-njssm"] Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.081416 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.084124 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.084873 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.087156 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v6qr\" (UniqueName: \"kubernetes.io/projected/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-kube-api-access-8v6qr\") pod \"nova-cell0-ca6b-account-create-update-9rj6z\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.120697 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cda2-account-create-update-njssm"] Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.170505 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-operator-scripts\") pod \"nova-cell1-cda2-account-create-update-njssm\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.170669 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rl7g\" (UniqueName: \"kubernetes.io/projected/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-kube-api-access-7rl7g\") pod \"nova-cell1-cda2-account-create-update-njssm\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.193460 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.273442 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-operator-scripts\") pod \"nova-cell1-cda2-account-create-update-njssm\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.273504 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rl7g\" (UniqueName: \"kubernetes.io/projected/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-kube-api-access-7rl7g\") pod \"nova-cell1-cda2-account-create-update-njssm\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.274419 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-operator-scripts\") pod \"nova-cell1-cda2-account-create-update-njssm\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.290557 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rl7g\" (UniqueName: \"kubernetes.io/projected/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-kube-api-access-7rl7g\") pod \"nova-cell1-cda2-account-create-update-njssm\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.405882 4926 generic.go:334] "Generic (PLEG): container finished" podID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerID="1cf829482d4e11d816d3102c1a6f63cdade69e841f157c7d4541ecf4b0799142" exitCode=137 Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.406259 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-765875bb4b-tr7fm" event={"ID":"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55","Type":"ContainerDied","Data":"1cf829482d4e11d816d3102c1a6f63cdade69e841f157c7d4541ecf4b0799142"} Nov 25 18:33:10 crc kubenswrapper[4926]: I1125 18:33:10.454517 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:11 crc kubenswrapper[4926]: I1125 18:33:11.441512 4926 generic.go:334] "Generic (PLEG): container finished" podID="2add9e5e-d863-4ecc-9778-c932b0532956" containerID="e3bb368b566e83dfbfef46792d7aa559564869d6ef4474b4dd80f5cd916d46e4" exitCode=0 Nov 25 18:33:11 crc kubenswrapper[4926]: I1125 18:33:11.441583 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerDied","Data":"e3bb368b566e83dfbfef46792d7aa559564869d6ef4474b4dd80f5cd916d46e4"} Nov 25 18:33:13 crc kubenswrapper[4926]: I1125 18:33:13.980103 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 18:33:14 crc kubenswrapper[4926]: I1125 18:33:14.915064 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:14 crc kubenswrapper[4926]: I1125 18:33:14.915329 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:14 crc kubenswrapper[4926]: I1125 18:33:14.915977 4926 scope.go:117] "RemoveContainer" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:14 crc kubenswrapper[4926]: E1125 18:33:14.916202 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(9f664100-2926-4e80-a06e-5c09021eb736)\"" pod="openstack/watcher-decision-engine-0" podUID="9f664100-2926-4e80-a06e-5c09021eb736" Nov 25 18:33:15 crc kubenswrapper[4926]: I1125 18:33:15.319076 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-765875bb4b-tr7fm" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.161:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.161:8443: connect: connection refused" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.819732 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.835231 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.851812 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.936985 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-scripts\") pod \"2add9e5e-d863-4ecc-9778-c932b0532956\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937322 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-sg-core-conf-yaml\") pod \"2add9e5e-d863-4ecc-9778-c932b0532956\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937400 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2r2m9\" (UniqueName: \"kubernetes.io/projected/2add9e5e-d863-4ecc-9778-c932b0532956-kube-api-access-2r2m9\") pod \"2add9e5e-d863-4ecc-9778-c932b0532956\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937443 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrml4\" (UniqueName: \"kubernetes.io/projected/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-kube-api-access-mrml4\") pod \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937483 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-combined-ca-bundle\") pod \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937503 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-scripts\") pod \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937523 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data\") pod \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937547 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-combined-ca-bundle\") pod \"2add9e5e-d863-4ecc-9778-c932b0532956\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937577 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-config-data\") pod \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937633 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-log-httpd\") pod \"2add9e5e-d863-4ecc-9778-c932b0532956\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937668 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-secret-key\") pod \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937749 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-combined-ca-bundle\") pod \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937770 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data-custom\") pod \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937789 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k597c\" (UniqueName: \"kubernetes.io/projected/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-kube-api-access-k597c\") pod \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937819 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-scripts\") pod \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937863 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-etc-machine-id\") pod \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\" (UID: \"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937896 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-logs\") pod \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937913 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-tls-certs\") pod \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\" (UID: \"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937934 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-run-httpd\") pod \"2add9e5e-d863-4ecc-9778-c932b0532956\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.937949 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-config-data\") pod \"2add9e5e-d863-4ecc-9778-c932b0532956\" (UID: \"2add9e5e-d863-4ecc-9778-c932b0532956\") " Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.953751 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-scripts" (OuterVolumeSpecName: "scripts") pod "2add9e5e-d863-4ecc-9778-c932b0532956" (UID: "2add9e5e-d863-4ecc-9778-c932b0532956"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.958299 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" (UID: "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.958830 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2add9e5e-d863-4ecc-9778-c932b0532956" (UID: "2add9e5e-d863-4ecc-9778-c932b0532956"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.959493 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2add9e5e-d863-4ecc-9778-c932b0532956" (UID: "2add9e5e-d863-4ecc-9778-c932b0532956"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.960083 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-logs" (OuterVolumeSpecName: "logs") pod "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" (UID: "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:17 crc kubenswrapper[4926]: I1125 18:33:17.962974 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2add9e5e-d863-4ecc-9778-c932b0532956-kube-api-access-2r2m9" (OuterVolumeSpecName: "kube-api-access-2r2m9") pod "2add9e5e-d863-4ecc-9778-c932b0532956" (UID: "2add9e5e-d863-4ecc-9778-c932b0532956"). InnerVolumeSpecName "kube-api-access-2r2m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:17.977631 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-kube-api-access-mrml4" (OuterVolumeSpecName: "kube-api-access-mrml4") pod "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" (UID: "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4"). InnerVolumeSpecName "kube-api-access-mrml4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.001696 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" (UID: "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.001857 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-scripts" (OuterVolumeSpecName: "scripts") pod "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" (UID: "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.004331 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-kube-api-access-k597c" (OuterVolumeSpecName: "kube-api-access-k597c") pod "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" (UID: "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55"). InnerVolumeSpecName "kube-api-access-k597c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.014733 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" (UID: "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.016512 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cda2-account-create-update-njssm"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040657 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040694 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040705 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040716 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2r2m9\" (UniqueName: \"kubernetes.io/projected/2add9e5e-d863-4ecc-9778-c932b0532956-kube-api-access-2r2m9\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040727 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrml4\" (UniqueName: \"kubernetes.io/projected/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-kube-api-access-mrml4\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040738 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040747 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2add9e5e-d863-4ecc-9778-c932b0532956-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040758 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040768 4926 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040779 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k597c\" (UniqueName: \"kubernetes.io/projected/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-kube-api-access-k597c\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.040789 4926 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.058994 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-config-data" (OuterVolumeSpecName: "config-data") pod "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" (UID: "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.076807 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" (UID: "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.085012 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-scripts" (OuterVolumeSpecName: "scripts") pod "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" (UID: "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.098554 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2add9e5e-d863-4ecc-9778-c932b0532956" (UID: "2add9e5e-d863-4ecc-9778-c932b0532956"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.119663 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" (UID: "b5c499cb-2dc4-4d3d-9bcd-4933f8bace55"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.119761 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" (UID: "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.142883 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.142914 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.142923 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.142932 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.142942 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.142950 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.155183 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2add9e5e-d863-4ecc-9778-c932b0532956" (UID: "2add9e5e-d863-4ecc-9778-c932b0532956"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.186537 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data" (OuterVolumeSpecName: "config-data") pod "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" (UID: "d317a89a-d9e1-4a1c-8019-cf3cb7f517c4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.222973 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-config-data" (OuterVolumeSpecName: "config-data") pod "2add9e5e-d863-4ecc-9778-c932b0532956" (UID: "2add9e5e-d863-4ecc-9778-c932b0532956"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.248560 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.248610 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.248620 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2add9e5e-d863-4ecc-9778-c932b0532956-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: W1125 18:33:18.279315 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae1b2a5c_29d8_42a3_aedc_eb296da03c2d.slice/crio-9f42f1af9063e6d1c93f06029ee15d7d0876dff6a786c318bbce3b8b97042005 WatchSource:0}: Error finding container 9f42f1af9063e6d1c93f06029ee15d7d0876dff6a786c318bbce3b8b97042005: Status 404 returned error can't find the container with id 9f42f1af9063e6d1c93f06029ee15d7d0876dff6a786c318bbce3b8b97042005 Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.476977 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-fm7m4"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.556190 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2add9e5e-d863-4ecc-9778-c932b0532956","Type":"ContainerDied","Data":"7d649be1828ec9e5a8776f82b2dbc722bca5b2f00d5e4f4c83a501154fcb736b"} Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.556226 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.556261 4926 scope.go:117] "RemoveContainer" containerID="eb6bf2935b1bdf4739f66fadc3dfa9aabc995cc5b1e8cddab8d31d731a94e18e" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.562672 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cda2-account-create-update-njssm" event={"ID":"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d","Type":"ContainerStarted","Data":"9f42f1af9063e6d1c93f06029ee15d7d0876dff6a786c318bbce3b8b97042005"} Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.566222 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-765875bb4b-tr7fm" event={"ID":"b5c499cb-2dc4-4d3d-9bcd-4933f8bace55","Type":"ContainerDied","Data":"6d9953637e4afd041f4ba7cd4a9c0596713e3ad4ae424d5475ddf15b63765128"} Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.566395 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-765875bb4b-tr7fm" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.579980 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d317a89a-d9e1-4a1c-8019-cf3cb7f517c4","Type":"ContainerDied","Data":"be589561c8cad080fe46ed81c815148bd0b27842a60ba27d941117fb8d88ba59"} Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.580062 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.594010 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fm7m4" event={"ID":"8fb216ae-643a-47e9-9dce-b3f13e633e95","Type":"ContainerStarted","Data":"478b392f22efedfd9a006a8f70d5d74cbd460a7d9a524e08cd42cd1cd6989294"} Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.609690 4926 generic.go:334] "Generic (PLEG): container finished" podID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerID="2ad57f255b3a7c599d030c35520841e08c4cce4188725f9f8b75179bd1e32703" exitCode=0 Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.609783 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bbf6864d-nswqp" event={"ID":"42900e2c-5c74-417f-a60c-6955d8c0fc29","Type":"ContainerDied","Data":"2ad57f255b3a7c599d030c35520841e08c4cce4188725f9f8b75179bd1e32703"} Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.619563 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.040071526 podStartE2EDuration="16.619541568s" podCreationTimestamp="2025-11-25 18:33:02 +0000 UTC" firstStartedPulling="2025-11-25 18:33:03.180539479 +0000 UTC m=+1213.566053084" lastFinishedPulling="2025-11-25 18:33:17.760009521 +0000 UTC m=+1228.145523126" observedRunningTime="2025-11-25 18:33:18.609344859 +0000 UTC m=+1228.994858464" watchObservedRunningTime="2025-11-25 18:33:18.619541568 +0000 UTC m=+1229.005055173" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.697564 4926 scope.go:117] "RemoveContainer" containerID="325a126e5dc054dc07e98e78ed06dbe83cdff707ac49c258c75e5df380fc7918" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.789599 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.824167 4926 scope.go:117] "RemoveContainer" containerID="e3bb368b566e83dfbfef46792d7aa559564869d6ef4474b4dd80f5cd916d46e4" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.872732 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-765875bb4b-tr7fm"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.881475 4926 scope.go:117] "RemoveContainer" containerID="4d98227633c8f09c886dfd83d431321cc347e53dba2bfaead5bb3c5e38436249" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.884463 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-765875bb4b-tr7fm"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.884842 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-httpd-config\") pod \"42900e2c-5c74-417f-a60c-6955d8c0fc29\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.884875 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-ovndb-tls-certs\") pod \"42900e2c-5c74-417f-a60c-6955d8c0fc29\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.884941 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c92gw\" (UniqueName: \"kubernetes.io/projected/42900e2c-5c74-417f-a60c-6955d8c0fc29-kube-api-access-c92gw\") pod \"42900e2c-5c74-417f-a60c-6955d8c0fc29\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.889993 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-config\") pod \"42900e2c-5c74-417f-a60c-6955d8c0fc29\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.890037 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-combined-ca-bundle\") pod \"42900e2c-5c74-417f-a60c-6955d8c0fc29\" (UID: \"42900e2c-5c74-417f-a60c-6955d8c0fc29\") " Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.897976 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42900e2c-5c74-417f-a60c-6955d8c0fc29-kube-api-access-c92gw" (OuterVolumeSpecName: "kube-api-access-c92gw") pod "42900e2c-5c74-417f-a60c-6955d8c0fc29" (UID: "42900e2c-5c74-417f-a60c-6955d8c0fc29"). InnerVolumeSpecName "kube-api-access-c92gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.902463 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.915577 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "42900e2c-5c74-417f-a60c-6955d8c0fc29" (UID: "42900e2c-5c74-417f-a60c-6955d8c0fc29"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.931193 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.945587 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.957163 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.964596 4926 scope.go:117] "RemoveContainer" containerID="852a73a980fb903c7c9cee66c146d5af7a6a6f5e72cce623c7519f6abb47b4ff" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.976282 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.976879 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-api" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.976899 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-api" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.976912 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-notification-agent" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.976918 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-notification-agent" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.976929 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-httpd" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.976936 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-httpd" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.976950 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="sg-core" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.976956 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="sg-core" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.976968 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon-log" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.976975 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon-log" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.976984 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="proxy-httpd" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.976989 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="proxy-httpd" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.977002 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-central-agent" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977008 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-central-agent" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.977030 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="cinder-scheduler" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977036 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="cinder-scheduler" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.977048 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="probe" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977054 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="probe" Nov 25 18:33:18 crc kubenswrapper[4926]: E1125 18:33:18.977068 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977073 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977235 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="proxy-httpd" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977250 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-api" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977260 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="sg-core" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977270 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="probe" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977282 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" containerName="cinder-scheduler" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977291 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" containerName="neutron-httpd" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977298 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977307 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-notification-agent" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977318 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" containerName="ceilometer-central-agent" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.977327 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" containerName="horizon-log" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.978965 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.982180 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.982509 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.995718 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996495 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-scripts\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996543 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996597 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-run-httpd\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996614 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlzsw\" (UniqueName: \"kubernetes.io/projected/332b4639-2578-4039-b9da-cbef6b683a2a-kube-api-access-vlzsw\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996637 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-config-data\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996665 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996684 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-log-httpd\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996726 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c92gw\" (UniqueName: \"kubernetes.io/projected/42900e2c-5c74-417f-a60c-6955d8c0fc29-kube-api-access-c92gw\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.996737 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:18 crc kubenswrapper[4926]: I1125 18:33:18.997969 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.000604 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.012702 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42900e2c-5c74-417f-a60c-6955d8c0fc29" (UID: "42900e2c-5c74-417f-a60c-6955d8c0fc29"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.023627 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-config" (OuterVolumeSpecName: "config") pod "42900e2c-5c74-417f-a60c-6955d8c0fc29" (UID: "42900e2c-5c74-417f-a60c-6955d8c0fc29"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.050079 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.065809 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "42900e2c-5c74-417f-a60c-6955d8c0fc29" (UID: "42900e2c-5c74-417f-a60c-6955d8c0fc29"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.083725 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.094736 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ldk4s"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099245 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-config-data\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099282 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099320 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099354 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-scripts\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099414 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099467 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwttn\" (UniqueName: \"kubernetes.io/projected/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-kube-api-access-dwttn\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099496 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-run-httpd\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099512 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlzsw\" (UniqueName: \"kubernetes.io/projected/332b4639-2578-4039-b9da-cbef6b683a2a-kube-api-access-vlzsw\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099527 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099546 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-config-data\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099574 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-scripts\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099593 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099610 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-log-httpd\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099661 4926 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099672 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.099681 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42900e2c-5c74-417f-a60c-6955d8c0fc29-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.100074 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-log-httpd\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.101034 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-run-httpd\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.108748 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a61c-account-create-update-ntxbc"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.110172 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-config-data\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.110716 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-scripts\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.111221 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.115927 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.121659 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ca6b-account-create-update-9rj6z"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.126065 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlzsw\" (UniqueName: \"kubernetes.io/projected/332b4639-2578-4039-b9da-cbef6b683a2a-kube-api-access-vlzsw\") pod \"ceilometer-0\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.144558 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-2sw5v"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.159618 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5db9fd79b9-99khc"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.196286 4926 scope.go:117] "RemoveContainer" containerID="1cf829482d4e11d816d3102c1a6f63cdade69e841f157c7d4541ecf4b0799142" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.201235 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwttn\" (UniqueName: \"kubernetes.io/projected/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-kube-api-access-dwttn\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.201279 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.201316 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-scripts\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.201382 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-config-data\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.201400 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.201433 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.202300 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.205979 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-config-data\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.206966 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-scripts\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.207584 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.209042 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.223821 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwttn\" (UniqueName: \"kubernetes.io/projected/ad6a0baa-57a5-47d8-81fc-4395a6f4079a-kube-api-access-dwttn\") pod \"cinder-scheduler-0\" (UID: \"ad6a0baa-57a5-47d8-81fc-4395a6f4079a\") " pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.317140 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.334875 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.516542 4926 scope.go:117] "RemoveContainer" containerID="0bb8be6193c0e28f102e9b0a317a069c359a76e7a02fde71348581142eb687ea" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.572662 4926 scope.go:117] "RemoveContainer" containerID="ec688fb6ad6f0873a99c56b77c5456c4054c5619d822d1ecb1d54779ae23c2a4" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.622204 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bbf6864d-nswqp" event={"ID":"42900e2c-5c74-417f-a60c-6955d8c0fc29","Type":"ContainerDied","Data":"196e76d80ec3b641f7a9a0a71395d9d9605b393fd2e8e49df37197f210372db6"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.622287 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bbf6864d-nswqp" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.658930 4926 scope.go:117] "RemoveContainer" containerID="745ee863d21e18e961f2f65c719dab8530a6229a44af738fc91abef8cf8241d7" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.659079 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" event={"ID":"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0","Type":"ContainerStarted","Data":"0446a33cabee2a3eb6ccfed77112fb9548d216f4af3fc4ac6187f90acde6ade2"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.667324 4926 generic.go:334] "Generic (PLEG): container finished" podID="ae1b2a5c-29d8-42a3-aedc-eb296da03c2d" containerID="bb1a43ff5f94f6f91bcdf3f12ffcf59bf4c7d3cfc277fbc8ff40cf19fd56eaa1" exitCode=0 Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.667438 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cda2-account-create-update-njssm" event={"ID":"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d","Type":"ContainerDied","Data":"bb1a43ff5f94f6f91bcdf3f12ffcf59bf4c7d3cfc277fbc8ff40cf19fd56eaa1"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.689115 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2sw5v" event={"ID":"79206e63-62c7-45e0-93f3-f30a28d822bb","Type":"ContainerStarted","Data":"2b8d3e9987a23338b615b6ea5e4344f8edb0b92dbf831f6ab96c26addfb426f4"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.691993 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ldk4s" event={"ID":"46c8d0a4-afda-4df7-83f5-bfb0e46139cf","Type":"ContainerStarted","Data":"7fd28a6cc89de71595f847ed5ee607106e543a3ee2a0ee12afe7233180501aa3"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.695206 4926 scope.go:117] "RemoveContainer" containerID="2ad57f255b3a7c599d030c35520841e08c4cce4188725f9f8b75179bd1e32703" Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.706962 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5db9fd79b9-99khc" event={"ID":"3842d4ae-3cdd-48ad-a374-dbf807481f6f","Type":"ContainerStarted","Data":"78ef09e4d0c16f82b3a7ffcc99915223669b000b1a4bf04b673938399f4b4253"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.708771 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"436e3ae7-0a2a-4cba-9416-804f6cba9b26","Type":"ContainerStarted","Data":"62e9dcbf8248aa4ba9ae9a05cacba814d20531c6ebaaef834248d23e003e6701"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.724904 4926 generic.go:334] "Generic (PLEG): container finished" podID="8fb216ae-643a-47e9-9dce-b3f13e633e95" containerID="8ada8dfca4122a68a26eab3b2528f50d4151e5f176da9b7e3d0b762df6b777bf" exitCode=0 Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.724989 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fm7m4" event={"ID":"8fb216ae-643a-47e9-9dce-b3f13e633e95","Type":"ContainerDied","Data":"8ada8dfca4122a68a26eab3b2528f50d4151e5f176da9b7e3d0b762df6b777bf"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.732414 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a61c-account-create-update-ntxbc" event={"ID":"68f7c172-f0a2-43f2-b4bc-5be36758ea34","Type":"ContainerStarted","Data":"b9e656e40828957f3e314d844290a61b121c84d81d45910f23c8e2ad4686b925"} Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.747650 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-bbf6864d-nswqp"] Nov 25 18:33:19 crc kubenswrapper[4926]: I1125 18:33:19.764514 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-bbf6864d-nswqp"] Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.026383 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 18:33:20 crc kubenswrapper[4926]: W1125 18:33:20.036491 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod332b4639_2578_4039_b9da_cbef6b683a2a.slice/crio-f00580b82f10a6e4c1862a83ab4b1e348d5f1ce5f0479a91ea240ea506e0350d WatchSource:0}: Error finding container f00580b82f10a6e4c1862a83ab4b1e348d5f1ce5f0479a91ea240ea506e0350d: Status 404 returned error can't find the container with id f00580b82f10a6e4c1862a83ab4b1e348d5f1ce5f0479a91ea240ea506e0350d Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.037907 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.345227 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2add9e5e-d863-4ecc-9778-c932b0532956" path="/var/lib/kubelet/pods/2add9e5e-d863-4ecc-9778-c932b0532956/volumes" Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.346005 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42900e2c-5c74-417f-a60c-6955d8c0fc29" path="/var/lib/kubelet/pods/42900e2c-5c74-417f-a60c-6955d8c0fc29/volumes" Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.347391 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5c499cb-2dc4-4d3d-9bcd-4933f8bace55" path="/var/lib/kubelet/pods/b5c499cb-2dc4-4d3d-9bcd-4933f8bace55/volumes" Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.348067 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d317a89a-d9e1-4a1c-8019-cf3cb7f517c4" path="/var/lib/kubelet/pods/d317a89a-d9e1-4a1c-8019-cf3cb7f517c4/volumes" Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.581769 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.754862 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad6a0baa-57a5-47d8-81fc-4395a6f4079a","Type":"ContainerStarted","Data":"aed393714dc1c51e9f8d78106054ac88975fa5e8a79ed66c109699188665b668"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.759353 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5db9fd79b9-99khc" event={"ID":"3842d4ae-3cdd-48ad-a374-dbf807481f6f","Type":"ContainerStarted","Data":"ddef70c0e23244a67068e7866c92ad32f822bf2f7dba8540fbe9a0eba6f78dde"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.759414 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5db9fd79b9-99khc" event={"ID":"3842d4ae-3cdd-48ad-a374-dbf807481f6f","Type":"ContainerStarted","Data":"c34aae55cc3379774df3f340ca66202a572e4f2dd924829ea7f4d3f0c3e962bc"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.760457 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.760493 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.763533 4926 generic.go:334] "Generic (PLEG): container finished" podID="68f7c172-f0a2-43f2-b4bc-5be36758ea34" containerID="2e114e6cf51eddfab79d846bda93db3dd4e9df7d4df99254d7315a73c35a1bb5" exitCode=0 Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.763623 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a61c-account-create-update-ntxbc" event={"ID":"68f7c172-f0a2-43f2-b4bc-5be36758ea34","Type":"ContainerDied","Data":"2e114e6cf51eddfab79d846bda93db3dd4e9df7d4df99254d7315a73c35a1bb5"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.769844 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerStarted","Data":"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.769877 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerStarted","Data":"f00580b82f10a6e4c1862a83ab4b1e348d5f1ce5f0479a91ea240ea506e0350d"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.774271 4926 generic.go:334] "Generic (PLEG): container finished" podID="79206e63-62c7-45e0-93f3-f30a28d822bb" containerID="514a25c0dc7dec4829311eed50cd9a6abe27efd9f7bace46ea9fdd86b82befe2" exitCode=0 Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.774312 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2sw5v" event={"ID":"79206e63-62c7-45e0-93f3-f30a28d822bb","Type":"ContainerDied","Data":"514a25c0dc7dec4829311eed50cd9a6abe27efd9f7bace46ea9fdd86b82befe2"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.777744 4926 generic.go:334] "Generic (PLEG): container finished" podID="46c8d0a4-afda-4df7-83f5-bfb0e46139cf" containerID="167a96f77e78d17987983c6d21fc209c668b8d6c21bbc3bb084514d729b4c871" exitCode=0 Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.777778 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ldk4s" event={"ID":"46c8d0a4-afda-4df7-83f5-bfb0e46139cf","Type":"ContainerDied","Data":"167a96f77e78d17987983c6d21fc209c668b8d6c21bbc3bb084514d729b4c871"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.782211 4926 generic.go:334] "Generic (PLEG): container finished" podID="aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0" containerID="df7a15ec5fbc6c93d8391912c0832ea1c8d22088a01fd7c658fa1f7b298090cc" exitCode=0 Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.782385 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" event={"ID":"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0","Type":"ContainerDied","Data":"df7a15ec5fbc6c93d8391912c0832ea1c8d22088a01fd7c658fa1f7b298090cc"} Nov 25 18:33:20 crc kubenswrapper[4926]: I1125 18:33:20.807165 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5db9fd79b9-99khc" podStartSLOduration=13.807145125 podStartE2EDuration="13.807145125s" podCreationTimestamp="2025-11-25 18:33:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:33:20.782842278 +0000 UTC m=+1231.168355893" watchObservedRunningTime="2025-11-25 18:33:20.807145125 +0000 UTC m=+1231.192658730" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.515756 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.561624 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.614774 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mlw2\" (UniqueName: \"kubernetes.io/projected/8fb216ae-643a-47e9-9dce-b3f13e633e95-kube-api-access-7mlw2\") pod \"8fb216ae-643a-47e9-9dce-b3f13e633e95\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.614825 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fb216ae-643a-47e9-9dce-b3f13e633e95-operator-scripts\") pod \"8fb216ae-643a-47e9-9dce-b3f13e633e95\" (UID: \"8fb216ae-643a-47e9-9dce-b3f13e633e95\") " Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.614889 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rl7g\" (UniqueName: \"kubernetes.io/projected/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-kube-api-access-7rl7g\") pod \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.615130 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-operator-scripts\") pod \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\" (UID: \"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d\") " Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.616298 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ae1b2a5c-29d8-42a3-aedc-eb296da03c2d" (UID: "ae1b2a5c-29d8-42a3-aedc-eb296da03c2d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.620668 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fb216ae-643a-47e9-9dce-b3f13e633e95-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8fb216ae-643a-47e9-9dce-b3f13e633e95" (UID: "8fb216ae-643a-47e9-9dce-b3f13e633e95"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.627782 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-kube-api-access-7rl7g" (OuterVolumeSpecName: "kube-api-access-7rl7g") pod "ae1b2a5c-29d8-42a3-aedc-eb296da03c2d" (UID: "ae1b2a5c-29d8-42a3-aedc-eb296da03c2d"). InnerVolumeSpecName "kube-api-access-7rl7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.628618 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fb216ae-643a-47e9-9dce-b3f13e633e95-kube-api-access-7mlw2" (OuterVolumeSpecName: "kube-api-access-7mlw2") pod "8fb216ae-643a-47e9-9dce-b3f13e633e95" (UID: "8fb216ae-643a-47e9-9dce-b3f13e633e95"). InnerVolumeSpecName "kube-api-access-7mlw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.717322 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mlw2\" (UniqueName: \"kubernetes.io/projected/8fb216ae-643a-47e9-9dce-b3f13e633e95-kube-api-access-7mlw2\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.717356 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fb216ae-643a-47e9-9dce-b3f13e633e95-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.717394 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rl7g\" (UniqueName: \"kubernetes.io/projected/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-kube-api-access-7rl7g\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.717405 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.800121 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad6a0baa-57a5-47d8-81fc-4395a6f4079a","Type":"ContainerStarted","Data":"0a7e3c5015ab61589ea8cdadcbaa25bb631eee2e4e6925edac60aad9d8bc695a"} Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.806544 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fm7m4" event={"ID":"8fb216ae-643a-47e9-9dce-b3f13e633e95","Type":"ContainerDied","Data":"478b392f22efedfd9a006a8f70d5d74cbd460a7d9a524e08cd42cd1cd6989294"} Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.806574 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="478b392f22efedfd9a006a8f70d5d74cbd460a7d9a524e08cd42cd1cd6989294" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.806623 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fm7m4" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.825777 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cda2-account-create-update-njssm" event={"ID":"ae1b2a5c-29d8-42a3-aedc-eb296da03c2d","Type":"ContainerDied","Data":"9f42f1af9063e6d1c93f06029ee15d7d0876dff6a786c318bbce3b8b97042005"} Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.826069 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f42f1af9063e6d1c93f06029ee15d7d0876dff6a786c318bbce3b8b97042005" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.826203 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cda2-account-create-update-njssm" Nov 25 18:33:21 crc kubenswrapper[4926]: I1125 18:33:21.835921 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerStarted","Data":"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e"} Nov 25 18:33:21 crc kubenswrapper[4926]: E1125 18:33:21.991341 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae1b2a5c_29d8_42a3_aedc_eb296da03c2d.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.143685 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.247900 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68f7c172-f0a2-43f2-b4bc-5be36758ea34-operator-scripts\") pod \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.248437 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-np58f\" (UniqueName: \"kubernetes.io/projected/68f7c172-f0a2-43f2-b4bc-5be36758ea34-kube-api-access-np58f\") pod \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\" (UID: \"68f7c172-f0a2-43f2-b4bc-5be36758ea34\") " Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.253448 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68f7c172-f0a2-43f2-b4bc-5be36758ea34-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "68f7c172-f0a2-43f2-b4bc-5be36758ea34" (UID: "68f7c172-f0a2-43f2-b4bc-5be36758ea34"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.253552 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68f7c172-f0a2-43f2-b4bc-5be36758ea34-kube-api-access-np58f" (OuterVolumeSpecName: "kube-api-access-np58f") pod "68f7c172-f0a2-43f2-b4bc-5be36758ea34" (UID: "68f7c172-f0a2-43f2-b4bc-5be36758ea34"). InnerVolumeSpecName "kube-api-access-np58f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.350526 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-np58f\" (UniqueName: \"kubernetes.io/projected/68f7c172-f0a2-43f2-b4bc-5be36758ea34-kube-api-access-np58f\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.350562 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68f7c172-f0a2-43f2-b4bc-5be36758ea34-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.846694 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad6a0baa-57a5-47d8-81fc-4395a6f4079a","Type":"ContainerStarted","Data":"bf77f28d049454b521aab5705d29942afe758e8922ea2f9f565328881d278e1b"} Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.849962 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a61c-account-create-update-ntxbc" event={"ID":"68f7c172-f0a2-43f2-b4bc-5be36758ea34","Type":"ContainerDied","Data":"b9e656e40828957f3e314d844290a61b121c84d81d45910f23c8e2ad4686b925"} Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.850011 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9e656e40828957f3e314d844290a61b121c84d81d45910f23c8e2ad4686b925" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.850080 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a61c-account-create-update-ntxbc" Nov 25 18:33:22 crc kubenswrapper[4926]: I1125 18:33:22.871587 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.871566241 podStartE2EDuration="4.871566241s" podCreationTimestamp="2025-11-25 18:33:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:33:22.864577239 +0000 UTC m=+1233.250090904" watchObservedRunningTime="2025-11-25 18:33:22.871566241 +0000 UTC m=+1233.257079846" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.330864 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.338549 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.345999 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.384607 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v6qr\" (UniqueName: \"kubernetes.io/projected/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-kube-api-access-8v6qr\") pod \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.384711 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-operator-scripts\") pod \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\" (UID: \"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0\") " Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.384876 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znzwx\" (UniqueName: \"kubernetes.io/projected/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-kube-api-access-znzwx\") pod \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.384918 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-operator-scripts\") pod \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\" (UID: \"46c8d0a4-afda-4df7-83f5-bfb0e46139cf\") " Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.387004 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "46c8d0a4-afda-4df7-83f5-bfb0e46139cf" (UID: "46c8d0a4-afda-4df7-83f5-bfb0e46139cf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.387062 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0" (UID: "aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.397560 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-kube-api-access-8v6qr" (OuterVolumeSpecName: "kube-api-access-8v6qr") pod "aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0" (UID: "aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0"). InnerVolumeSpecName "kube-api-access-8v6qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.416644 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-kube-api-access-znzwx" (OuterVolumeSpecName: "kube-api-access-znzwx") pod "46c8d0a4-afda-4df7-83f5-bfb0e46139cf" (UID: "46c8d0a4-afda-4df7-83f5-bfb0e46139cf"). InnerVolumeSpecName "kube-api-access-znzwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.486960 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79206e63-62c7-45e0-93f3-f30a28d822bb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "79206e63-62c7-45e0-93f3-f30a28d822bb" (UID: "79206e63-62c7-45e0-93f3-f30a28d822bb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.487591 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79206e63-62c7-45e0-93f3-f30a28d822bb-operator-scripts\") pod \"79206e63-62c7-45e0-93f3-f30a28d822bb\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.487824 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2xck\" (UniqueName: \"kubernetes.io/projected/79206e63-62c7-45e0-93f3-f30a28d822bb-kube-api-access-h2xck\") pod \"79206e63-62c7-45e0-93f3-f30a28d822bb\" (UID: \"79206e63-62c7-45e0-93f3-f30a28d822bb\") " Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.488423 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znzwx\" (UniqueName: \"kubernetes.io/projected/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-kube-api-access-znzwx\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.488440 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c8d0a4-afda-4df7-83f5-bfb0e46139cf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.488452 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v6qr\" (UniqueName: \"kubernetes.io/projected/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-kube-api-access-8v6qr\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.488461 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.488470 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79206e63-62c7-45e0-93f3-f30a28d822bb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.493592 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79206e63-62c7-45e0-93f3-f30a28d822bb-kube-api-access-h2xck" (OuterVolumeSpecName: "kube-api-access-h2xck") pod "79206e63-62c7-45e0-93f3-f30a28d822bb" (UID: "79206e63-62c7-45e0-93f3-f30a28d822bb"). InnerVolumeSpecName "kube-api-access-h2xck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.590517 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2xck\" (UniqueName: \"kubernetes.io/projected/79206e63-62c7-45e0-93f3-f30a28d822bb-kube-api-access-h2xck\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.875522 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2sw5v" event={"ID":"79206e63-62c7-45e0-93f3-f30a28d822bb","Type":"ContainerDied","Data":"2b8d3e9987a23338b615b6ea5e4344f8edb0b92dbf831f6ab96c26addfb426f4"} Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.875581 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b8d3e9987a23338b615b6ea5e4344f8edb0b92dbf831f6ab96c26addfb426f4" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.875671 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2sw5v" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.880041 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerStarted","Data":"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb"} Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.882251 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ldk4s" event={"ID":"46c8d0a4-afda-4df7-83f5-bfb0e46139cf","Type":"ContainerDied","Data":"7fd28a6cc89de71595f847ed5ee607106e543a3ee2a0ee12afe7233180501aa3"} Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.882283 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7fd28a6cc89de71595f847ed5ee607106e543a3ee2a0ee12afe7233180501aa3" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.882342 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ldk4s" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.883676 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.883711 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ca6b-account-create-update-9rj6z" event={"ID":"aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0","Type":"ContainerDied","Data":"0446a33cabee2a3eb6ccfed77112fb9548d216f4af3fc4ac6187f90acde6ade2"} Nov 25 18:33:23 crc kubenswrapper[4926]: I1125 18:33:23.883728 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0446a33cabee2a3eb6ccfed77112fb9548d216f4af3fc4ac6187f90acde6ade2" Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.343253 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.904324 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerStarted","Data":"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da"} Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.904486 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-central-agent" containerID="cri-o://f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" gracePeriod=30 Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.904917 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="proxy-httpd" containerID="cri-o://cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" gracePeriod=30 Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.904945 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-notification-agent" containerID="cri-o://1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" gracePeriod=30 Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.905008 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="sg-core" containerID="cri-o://969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" gracePeriod=30 Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.914565 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.914890 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.915365 4926 scope.go:117] "RemoveContainer" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:24 crc kubenswrapper[4926]: E1125 18:33:24.915654 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(9f664100-2926-4e80-a06e-5c09021eb736)\"" pod="openstack/watcher-decision-engine-0" podUID="9f664100-2926-4e80-a06e-5c09021eb736" Nov 25 18:33:24 crc kubenswrapper[4926]: I1125 18:33:24.934625 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.544081577 podStartE2EDuration="6.934606026s" podCreationTimestamp="2025-11-25 18:33:18 +0000 UTC" firstStartedPulling="2025-11-25 18:33:20.038378564 +0000 UTC m=+1230.423892169" lastFinishedPulling="2025-11-25 18:33:24.428903013 +0000 UTC m=+1234.814416618" observedRunningTime="2025-11-25 18:33:24.932061083 +0000 UTC m=+1235.317574728" watchObservedRunningTime="2025-11-25 18:33:24.934606026 +0000 UTC m=+1235.320119631" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.210409 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nxqr8"] Nov 25 18:33:25 crc kubenswrapper[4926]: E1125 18:33:25.210870 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb216ae-643a-47e9-9dce-b3f13e633e95" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.210907 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb216ae-643a-47e9-9dce-b3f13e633e95" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: E1125 18:33:25.210922 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79206e63-62c7-45e0-93f3-f30a28d822bb" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.210929 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="79206e63-62c7-45e0-93f3-f30a28d822bb" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: E1125 18:33:25.210944 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c8d0a4-afda-4df7-83f5-bfb0e46139cf" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.210952 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c8d0a4-afda-4df7-83f5-bfb0e46139cf" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: E1125 18:33:25.210982 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae1b2a5c-29d8-42a3-aedc-eb296da03c2d" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.210990 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae1b2a5c-29d8-42a3-aedc-eb296da03c2d" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: E1125 18:33:25.211002 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68f7c172-f0a2-43f2-b4bc-5be36758ea34" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211009 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="68f7c172-f0a2-43f2-b4bc-5be36758ea34" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: E1125 18:33:25.211019 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211025 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211211 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fb216ae-643a-47e9-9dce-b3f13e633e95" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211225 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211247 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="79206e63-62c7-45e0-93f3-f30a28d822bb" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211255 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c8d0a4-afda-4df7-83f5-bfb0e46139cf" containerName="mariadb-database-create" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211264 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="68f7c172-f0a2-43f2-b4bc-5be36758ea34" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211272 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae1b2a5c-29d8-42a3-aedc-eb296da03c2d" containerName="mariadb-account-create-update" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.211973 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.218119 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nxqr8"] Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.218295 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-bsltd" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.218307 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.218478 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.330911 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-scripts\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.330988 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-config-data\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.331066 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqpmc\" (UniqueName: \"kubernetes.io/projected/6986f889-6366-45eb-8f6a-e52588461c3e-kube-api-access-sqpmc\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.331119 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.432480 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-scripts\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.432526 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-config-data\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.432593 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqpmc\" (UniqueName: \"kubernetes.io/projected/6986f889-6366-45eb-8f6a-e52588461c3e-kube-api-access-sqpmc\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.432645 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.444925 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-scripts\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.445077 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.449648 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-config-data\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.472495 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqpmc\" (UniqueName: \"kubernetes.io/projected/6986f889-6366-45eb-8f6a-e52588461c3e-kube-api-access-sqpmc\") pod \"nova-cell0-conductor-db-sync-nxqr8\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.543096 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.875763 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918469 4926 generic.go:334] "Generic (PLEG): container finished" podID="332b4639-2578-4039-b9da-cbef6b683a2a" containerID="cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" exitCode=0 Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918501 4926 generic.go:334] "Generic (PLEG): container finished" podID="332b4639-2578-4039-b9da-cbef6b683a2a" containerID="969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" exitCode=2 Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918511 4926 generic.go:334] "Generic (PLEG): container finished" podID="332b4639-2578-4039-b9da-cbef6b683a2a" containerID="1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" exitCode=0 Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918518 4926 generic.go:334] "Generic (PLEG): container finished" podID="332b4639-2578-4039-b9da-cbef6b683a2a" containerID="f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" exitCode=0 Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918566 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerDied","Data":"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da"} Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918614 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerDied","Data":"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb"} Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918625 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerDied","Data":"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e"} Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918624 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918648 4926 scope.go:117] "RemoveContainer" containerID="cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918634 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerDied","Data":"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24"} Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.918840 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332b4639-2578-4039-b9da-cbef6b683a2a","Type":"ContainerDied","Data":"f00580b82f10a6e4c1862a83ab4b1e348d5f1ce5f0479a91ea240ea506e0350d"} Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.919152 4926 scope.go:117] "RemoveContainer" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:25 crc kubenswrapper[4926]: E1125 18:33:25.919500 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(9f664100-2926-4e80-a06e-5c09021eb736)\"" pod="openstack/watcher-decision-engine-0" podUID="9f664100-2926-4e80-a06e-5c09021eb736" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.946120 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-sg-core-conf-yaml\") pod \"332b4639-2578-4039-b9da-cbef6b683a2a\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.946253 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-config-data\") pod \"332b4639-2578-4039-b9da-cbef6b683a2a\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.946317 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-scripts\") pod \"332b4639-2578-4039-b9da-cbef6b683a2a\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.946350 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-run-httpd\") pod \"332b4639-2578-4039-b9da-cbef6b683a2a\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.946484 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlzsw\" (UniqueName: \"kubernetes.io/projected/332b4639-2578-4039-b9da-cbef6b683a2a-kube-api-access-vlzsw\") pod \"332b4639-2578-4039-b9da-cbef6b683a2a\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.946530 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-log-httpd\") pod \"332b4639-2578-4039-b9da-cbef6b683a2a\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.946551 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-combined-ca-bundle\") pod \"332b4639-2578-4039-b9da-cbef6b683a2a\" (UID: \"332b4639-2578-4039-b9da-cbef6b683a2a\") " Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.949551 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "332b4639-2578-4039-b9da-cbef6b683a2a" (UID: "332b4639-2578-4039-b9da-cbef6b683a2a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.950406 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "332b4639-2578-4039-b9da-cbef6b683a2a" (UID: "332b4639-2578-4039-b9da-cbef6b683a2a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.953224 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-scripts" (OuterVolumeSpecName: "scripts") pod "332b4639-2578-4039-b9da-cbef6b683a2a" (UID: "332b4639-2578-4039-b9da-cbef6b683a2a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.953276 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/332b4639-2578-4039-b9da-cbef6b683a2a-kube-api-access-vlzsw" (OuterVolumeSpecName: "kube-api-access-vlzsw") pod "332b4639-2578-4039-b9da-cbef6b683a2a" (UID: "332b4639-2578-4039-b9da-cbef6b683a2a"). InnerVolumeSpecName "kube-api-access-vlzsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.958875 4926 scope.go:117] "RemoveContainer" containerID="969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" Nov 25 18:33:25 crc kubenswrapper[4926]: I1125 18:33:25.987459 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "332b4639-2578-4039-b9da-cbef6b683a2a" (UID: "332b4639-2578-4039-b9da-cbef6b683a2a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.000010 4926 scope.go:117] "RemoveContainer" containerID="1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.033285 4926 scope.go:117] "RemoveContainer" containerID="f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.048878 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlzsw\" (UniqueName: \"kubernetes.io/projected/332b4639-2578-4039-b9da-cbef6b683a2a-kube-api-access-vlzsw\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.048907 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.048916 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.048927 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.048935 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332b4639-2578-4039-b9da-cbef6b683a2a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.054567 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nxqr8"] Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.062719 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "332b4639-2578-4039-b9da-cbef6b683a2a" (UID: "332b4639-2578-4039-b9da-cbef6b683a2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.073167 4926 scope.go:117] "RemoveContainer" containerID="cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.077124 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": container with ID starting with cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da not found: ID does not exist" containerID="cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.077187 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da"} err="failed to get container status \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": rpc error: code = NotFound desc = could not find container \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": container with ID starting with cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.077223 4926 scope.go:117] "RemoveContainer" containerID="969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" Nov 25 18:33:26 crc kubenswrapper[4926]: W1125 18:33:26.077590 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6986f889_6366_45eb_8f6a_e52588461c3e.slice/crio-74df1ce7ad776c4fe3873d8959b18dd3353720fdffbd0d8d9214075aa41b818f WatchSource:0}: Error finding container 74df1ce7ad776c4fe3873d8959b18dd3353720fdffbd0d8d9214075aa41b818f: Status 404 returned error can't find the container with id 74df1ce7ad776c4fe3873d8959b18dd3353720fdffbd0d8d9214075aa41b818f Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.078460 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": container with ID starting with 969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb not found: ID does not exist" containerID="969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.078494 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb"} err="failed to get container status \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": rpc error: code = NotFound desc = could not find container \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": container with ID starting with 969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.078526 4926 scope.go:117] "RemoveContainer" containerID="1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.078802 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": container with ID starting with 1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e not found: ID does not exist" containerID="1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.078824 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e"} err="failed to get container status \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": rpc error: code = NotFound desc = could not find container \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": container with ID starting with 1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.078837 4926 scope.go:117] "RemoveContainer" containerID="f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.080121 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": container with ID starting with f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24 not found: ID does not exist" containerID="f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.080169 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24"} err="failed to get container status \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": rpc error: code = NotFound desc = could not find container \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": container with ID starting with f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24 not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.080195 4926 scope.go:117] "RemoveContainer" containerID="cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.084027 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da"} err="failed to get container status \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": rpc error: code = NotFound desc = could not find container \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": container with ID starting with cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.084052 4926 scope.go:117] "RemoveContainer" containerID="969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.086357 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb"} err="failed to get container status \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": rpc error: code = NotFound desc = could not find container \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": container with ID starting with 969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.086404 4926 scope.go:117] "RemoveContainer" containerID="1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.087602 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-config-data" (OuterVolumeSpecName: "config-data") pod "332b4639-2578-4039-b9da-cbef6b683a2a" (UID: "332b4639-2578-4039-b9da-cbef6b683a2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.088687 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e"} err="failed to get container status \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": rpc error: code = NotFound desc = could not find container \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": container with ID starting with 1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.088743 4926 scope.go:117] "RemoveContainer" containerID="f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.089160 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24"} err="failed to get container status \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": rpc error: code = NotFound desc = could not find container \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": container with ID starting with f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24 not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.089207 4926 scope.go:117] "RemoveContainer" containerID="cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.089567 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da"} err="failed to get container status \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": rpc error: code = NotFound desc = could not find container \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": container with ID starting with cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.089609 4926 scope.go:117] "RemoveContainer" containerID="969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.090084 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb"} err="failed to get container status \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": rpc error: code = NotFound desc = could not find container \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": container with ID starting with 969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.090117 4926 scope.go:117] "RemoveContainer" containerID="1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.090403 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e"} err="failed to get container status \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": rpc error: code = NotFound desc = could not find container \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": container with ID starting with 1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.090431 4926 scope.go:117] "RemoveContainer" containerID="f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.090640 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24"} err="failed to get container status \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": rpc error: code = NotFound desc = could not find container \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": container with ID starting with f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24 not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.090671 4926 scope.go:117] "RemoveContainer" containerID="cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.090973 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da"} err="failed to get container status \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": rpc error: code = NotFound desc = could not find container \"cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da\": container with ID starting with cfcc8d2d27076eb0e87fc9dcbe8fab77da0ff7d8d10c7c1c9e9e6eab313f47da not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.091011 4926 scope.go:117] "RemoveContainer" containerID="969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.091286 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb"} err="failed to get container status \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": rpc error: code = NotFound desc = could not find container \"969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb\": container with ID starting with 969f7c28add464a097168dc9bf2a1b9a1998225e3926531761587d8552ce8beb not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.091318 4926 scope.go:117] "RemoveContainer" containerID="1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.091649 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e"} err="failed to get container status \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": rpc error: code = NotFound desc = could not find container \"1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e\": container with ID starting with 1aaee4b14b455ddcdadd08a21b4a8727e6f72ea103e5c8e27b15e6612215748e not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.091683 4926 scope.go:117] "RemoveContainer" containerID="f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.092971 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24"} err="failed to get container status \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": rpc error: code = NotFound desc = could not find container \"f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24\": container with ID starting with f990f9564a7507fc4260c81096250cf8d855673b464ca116f4e0bf9fd548fa24 not found: ID does not exist" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.150631 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.150973 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332b4639-2578-4039-b9da-cbef6b683a2a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.258531 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.268565 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.290413 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.290867 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-central-agent" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.290886 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-central-agent" Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.290908 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="sg-core" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.290916 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="sg-core" Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.290927 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-notification-agent" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.290933 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-notification-agent" Nov 25 18:33:26 crc kubenswrapper[4926]: E1125 18:33:26.290946 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="proxy-httpd" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.290952 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="proxy-httpd" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.291154 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-notification-agent" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.291169 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="sg-core" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.291177 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="proxy-httpd" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.291193 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" containerName="ceilometer-central-agent" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.292934 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.295480 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.295656 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.301475 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.341139 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="332b4639-2578-4039-b9da-cbef6b683a2a" path="/var/lib/kubelet/pods/332b4639-2578-4039-b9da-cbef6b683a2a/volumes" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.353861 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-scripts\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.353906 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-run-httpd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.353938 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.353967 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppjkd\" (UniqueName: \"kubernetes.io/projected/3153a465-758c-4abd-bd9c-2087ab1eebd0-kube-api-access-ppjkd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.354031 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-config-data\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.354074 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.354105 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-log-httpd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.455411 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-config-data\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.455462 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.455489 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-log-httpd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.455561 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-scripts\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.455606 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-run-httpd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.455633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.455669 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppjkd\" (UniqueName: \"kubernetes.io/projected/3153a465-758c-4abd-bd9c-2087ab1eebd0-kube-api-access-ppjkd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.456111 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-log-httpd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.456173 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-run-httpd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.461070 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-scripts\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.461816 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.462419 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.465490 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-config-data\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.483031 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppjkd\" (UniqueName: \"kubernetes.io/projected/3153a465-758c-4abd-bd9c-2087ab1eebd0-kube-api-access-ppjkd\") pod \"ceilometer-0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.621624 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:26 crc kubenswrapper[4926]: I1125 18:33:26.932806 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" event={"ID":"6986f889-6366-45eb-8f6a-e52588461c3e","Type":"ContainerStarted","Data":"74df1ce7ad776c4fe3873d8959b18dd3353720fdffbd0d8d9214075aa41b818f"} Nov 25 18:33:27 crc kubenswrapper[4926]: I1125 18:33:27.071406 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:27 crc kubenswrapper[4926]: W1125 18:33:27.075365 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3153a465_758c_4abd_bd9c_2087ab1eebd0.slice/crio-9fba75915aea19fb1ebd9d2654220cbc81efe66b740661d04beddca142194156 WatchSource:0}: Error finding container 9fba75915aea19fb1ebd9d2654220cbc81efe66b740661d04beddca142194156: Status 404 returned error can't find the container with id 9fba75915aea19fb1ebd9d2654220cbc81efe66b740661d04beddca142194156 Nov 25 18:33:27 crc kubenswrapper[4926]: I1125 18:33:27.950623 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerStarted","Data":"33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798"} Nov 25 18:33:27 crc kubenswrapper[4926]: I1125 18:33:27.950987 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerStarted","Data":"3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8"} Nov 25 18:33:27 crc kubenswrapper[4926]: I1125 18:33:27.951011 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerStarted","Data":"9fba75915aea19fb1ebd9d2654220cbc81efe66b740661d04beddca142194156"} Nov 25 18:33:27 crc kubenswrapper[4926]: I1125 18:33:27.952926 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:27 crc kubenswrapper[4926]: I1125 18:33:27.954811 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5db9fd79b9-99khc" Nov 25 18:33:28 crc kubenswrapper[4926]: I1125 18:33:28.970862 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerStarted","Data":"250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d"} Nov 25 18:33:29 crc kubenswrapper[4926]: I1125 18:33:29.503421 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 18:33:29 crc kubenswrapper[4926]: I1125 18:33:29.990085 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerStarted","Data":"b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee"} Nov 25 18:33:29 crc kubenswrapper[4926]: I1125 18:33:29.990236 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:33:30 crc kubenswrapper[4926]: I1125 18:33:30.016550 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.547777849 podStartE2EDuration="4.016524583s" podCreationTimestamp="2025-11-25 18:33:26 +0000 UTC" firstStartedPulling="2025-11-25 18:33:27.078522648 +0000 UTC m=+1237.464036243" lastFinishedPulling="2025-11-25 18:33:29.547269372 +0000 UTC m=+1239.932782977" observedRunningTime="2025-11-25 18:33:30.007638644 +0000 UTC m=+1240.393152249" watchObservedRunningTime="2025-11-25 18:33:30.016524583 +0000 UTC m=+1240.402038188" Nov 25 18:33:30 crc kubenswrapper[4926]: I1125 18:33:30.251313 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:32 crc kubenswrapper[4926]: I1125 18:33:32.012881 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-central-agent" containerID="cri-o://3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8" gracePeriod=30 Nov 25 18:33:32 crc kubenswrapper[4926]: I1125 18:33:32.012939 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="sg-core" containerID="cri-o://250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d" gracePeriod=30 Nov 25 18:33:32 crc kubenswrapper[4926]: I1125 18:33:32.012919 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="proxy-httpd" containerID="cri-o://b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee" gracePeriod=30 Nov 25 18:33:32 crc kubenswrapper[4926]: I1125 18:33:32.012984 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-notification-agent" containerID="cri-o://33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798" gracePeriod=30 Nov 25 18:33:32 crc kubenswrapper[4926]: E1125 18:33:32.287844 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3153a465_758c_4abd_bd9c_2087ab1eebd0.slice/crio-conmon-b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3153a465_758c_4abd_bd9c_2087ab1eebd0.slice/crio-b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee.scope\": RecentStats: unable to find data in memory cache]" Nov 25 18:33:33 crc kubenswrapper[4926]: I1125 18:33:33.025645 4926 generic.go:334] "Generic (PLEG): container finished" podID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerID="b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee" exitCode=0 Nov 25 18:33:33 crc kubenswrapper[4926]: I1125 18:33:33.025901 4926 generic.go:334] "Generic (PLEG): container finished" podID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerID="250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d" exitCode=2 Nov 25 18:33:33 crc kubenswrapper[4926]: I1125 18:33:33.025910 4926 generic.go:334] "Generic (PLEG): container finished" podID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerID="33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798" exitCode=0 Nov 25 18:33:33 crc kubenswrapper[4926]: I1125 18:33:33.025729 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerDied","Data":"b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee"} Nov 25 18:33:33 crc kubenswrapper[4926]: I1125 18:33:33.025964 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerDied","Data":"250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d"} Nov 25 18:33:33 crc kubenswrapper[4926]: I1125 18:33:33.025979 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerDied","Data":"33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798"} Nov 25 18:33:35 crc kubenswrapper[4926]: I1125 18:33:35.773760 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:33:35 crc kubenswrapper[4926]: I1125 18:33:35.776068 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-log" containerID="cri-o://ac46ce64187e0857cfede484e0b57b8c0d0af2323862d2086ca42f29b7e939e5" gracePeriod=30 Nov 25 18:33:35 crc kubenswrapper[4926]: I1125 18:33:35.776247 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-httpd" containerID="cri-o://1ab43ba372074d6a02b0f693a9b2a90660d4d8877b41f35e8307d4e28bc52f2c" gracePeriod=30 Nov 25 18:33:36 crc kubenswrapper[4926]: I1125 18:33:36.065167 4926 generic.go:334] "Generic (PLEG): container finished" podID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerID="ac46ce64187e0857cfede484e0b57b8c0d0af2323862d2086ca42f29b7e939e5" exitCode=143 Nov 25 18:33:36 crc kubenswrapper[4926]: I1125 18:33:36.065212 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"18b3f2f7-c24a-4cbe-af19-4a124a7b393e","Type":"ContainerDied","Data":"ac46ce64187e0857cfede484e0b57b8c0d0af2323862d2086ca42f29b7e939e5"} Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.029862 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.030594 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-log" containerID="cri-o://52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca" gracePeriod=30 Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.030977 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-httpd" containerID="cri-o://dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819" gracePeriod=30 Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.098534 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" event={"ID":"6986f889-6366-45eb-8f6a-e52588461c3e","Type":"ContainerStarted","Data":"4e87aad84ec26afd36c0bba5c402fb5a163f9511a289b417105126661b4feeb4"} Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.126613 4926 generic.go:334] "Generic (PLEG): container finished" podID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerID="1ab43ba372074d6a02b0f693a9b2a90660d4d8877b41f35e8307d4e28bc52f2c" exitCode=0 Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.126666 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"18b3f2f7-c24a-4cbe-af19-4a124a7b393e","Type":"ContainerDied","Data":"1ab43ba372074d6a02b0f693a9b2a90660d4d8877b41f35e8307d4e28bc52f2c"} Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.135534 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" podStartSLOduration=1.605899404 podStartE2EDuration="12.135516902s" podCreationTimestamp="2025-11-25 18:33:25 +0000 UTC" firstStartedPulling="2025-11-25 18:33:26.084032782 +0000 UTC m=+1236.469546387" lastFinishedPulling="2025-11-25 18:33:36.61365028 +0000 UTC m=+1246.999163885" observedRunningTime="2025-11-25 18:33:37.130651161 +0000 UTC m=+1247.516164766" watchObservedRunningTime="2025-11-25 18:33:37.135516902 +0000 UTC m=+1247.521030507" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.255740 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.396914 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-scripts\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.397026 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-public-tls-certs\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.397074 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-httpd-run\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.397097 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-config-data\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.397153 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-logs\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.397318 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8nx8\" (UniqueName: \"kubernetes.io/projected/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-kube-api-access-p8nx8\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.397359 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.397638 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-combined-ca-bundle\") pod \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\" (UID: \"18b3f2f7-c24a-4cbe-af19-4a124a7b393e\") " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.398071 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.398277 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-logs" (OuterVolumeSpecName: "logs") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.398795 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.398811 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.425804 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-scripts" (OuterVolumeSpecName: "scripts") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.425803 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.436405 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.453760 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-kube-api-access-p8nx8" (OuterVolumeSpecName: "kube-api-access-p8nx8") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "kube-api-access-p8nx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.487281 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-config-data" (OuterVolumeSpecName: "config-data") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.503820 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8nx8\" (UniqueName: \"kubernetes.io/projected/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-kube-api-access-p8nx8\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.503872 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.503887 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.503900 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.503911 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.546531 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "18b3f2f7-c24a-4cbe-af19-4a124a7b393e" (UID: "18b3f2f7-c24a-4cbe-af19-4a124a7b393e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.567856 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.606786 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.606823 4926 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18b3f2f7-c24a-4cbe-af19-4a124a7b393e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:37 crc kubenswrapper[4926]: I1125 18:33:37.989692 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123046 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-run-httpd\") pod \"3153a465-758c-4abd-bd9c-2087ab1eebd0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123113 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-combined-ca-bundle\") pod \"3153a465-758c-4abd-bd9c-2087ab1eebd0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123178 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-scripts\") pod \"3153a465-758c-4abd-bd9c-2087ab1eebd0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123222 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-sg-core-conf-yaml\") pod \"3153a465-758c-4abd-bd9c-2087ab1eebd0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123252 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppjkd\" (UniqueName: \"kubernetes.io/projected/3153a465-758c-4abd-bd9c-2087ab1eebd0-kube-api-access-ppjkd\") pod \"3153a465-758c-4abd-bd9c-2087ab1eebd0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123319 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-log-httpd\") pod \"3153a465-758c-4abd-bd9c-2087ab1eebd0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123406 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-config-data\") pod \"3153a465-758c-4abd-bd9c-2087ab1eebd0\" (UID: \"3153a465-758c-4abd-bd9c-2087ab1eebd0\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.123884 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3153a465-758c-4abd-bd9c-2087ab1eebd0" (UID: "3153a465-758c-4abd-bd9c-2087ab1eebd0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.124219 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3153a465-758c-4abd-bd9c-2087ab1eebd0" (UID: "3153a465-758c-4abd-bd9c-2087ab1eebd0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.131932 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3153a465-758c-4abd-bd9c-2087ab1eebd0-kube-api-access-ppjkd" (OuterVolumeSpecName: "kube-api-access-ppjkd") pod "3153a465-758c-4abd-bd9c-2087ab1eebd0" (UID: "3153a465-758c-4abd-bd9c-2087ab1eebd0"). InnerVolumeSpecName "kube-api-access-ppjkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.132476 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-scripts" (OuterVolumeSpecName: "scripts") pod "3153a465-758c-4abd-bd9c-2087ab1eebd0" (UID: "3153a465-758c-4abd-bd9c-2087ab1eebd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.163606 4926 generic.go:334] "Generic (PLEG): container finished" podID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerID="3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8" exitCode=0 Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.163730 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.163751 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerDied","Data":"3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8"} Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.163792 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3153a465-758c-4abd-bd9c-2087ab1eebd0","Type":"ContainerDied","Data":"9fba75915aea19fb1ebd9d2654220cbc81efe66b740661d04beddca142194156"} Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.163815 4926 scope.go:117] "RemoveContainer" containerID="b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.170662 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"18b3f2f7-c24a-4cbe-af19-4a124a7b393e","Type":"ContainerDied","Data":"6458834468da38cef67554ca0958ca0d3c4b9c0653024cbf500ef0abaf350fab"} Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.170963 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.175357 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3153a465-758c-4abd-bd9c-2087ab1eebd0" (UID: "3153a465-758c-4abd-bd9c-2087ab1eebd0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.176899 4926 generic.go:334] "Generic (PLEG): container finished" podID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerID="52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca" exitCode=143 Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.177447 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841","Type":"ContainerDied","Data":"52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca"} Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.226197 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.226229 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.226237 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.226247 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppjkd\" (UniqueName: \"kubernetes.io/projected/3153a465-758c-4abd-bd9c-2087ab1eebd0-kube-api-access-ppjkd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.226257 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3153a465-758c-4abd-bd9c-2087ab1eebd0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.246528 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3153a465-758c-4abd-bd9c-2087ab1eebd0" (UID: "3153a465-758c-4abd-bd9c-2087ab1eebd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.263471 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-config-data" (OuterVolumeSpecName: "config-data") pod "3153a465-758c-4abd-bd9c-2087ab1eebd0" (UID: "3153a465-758c-4abd-bd9c-2087ab1eebd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.327843 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.327885 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3153a465-758c-4abd-bd9c-2087ab1eebd0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.434445 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.451647 4926 scope.go:117] "RemoveContainer" containerID="250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.453822 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.463527 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.464013 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="proxy-httpd" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464028 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="proxy-httpd" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.464038 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-notification-agent" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464045 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-notification-agent" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.464063 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="sg-core" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464070 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="sg-core" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.464095 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-central-agent" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464102 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-central-agent" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.464114 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-httpd" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464121 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-httpd" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.464134 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-log" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464139 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-log" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464322 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-central-agent" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464340 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-httpd" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464350 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="proxy-httpd" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464361 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" containerName="glance-log" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464384 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="ceilometer-notification-agent" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.464394 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" containerName="sg-core" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.465467 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.468920 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.469086 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.493926 4926 scope.go:117] "RemoveContainer" containerID="33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.494120 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538050 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538100 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-logs\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538142 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-scripts\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538177 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jmw6\" (UniqueName: \"kubernetes.io/projected/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-kube-api-access-8jmw6\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538242 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538275 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-config-data\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538327 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538364 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.538824 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.550177 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.551285 4926 scope.go:117] "RemoveContainer" containerID="3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.557230 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.559796 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.564791 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.565026 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.566443 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.615603 4926 scope.go:117] "RemoveContainer" containerID="b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.616642 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee\": container with ID starting with b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee not found: ID does not exist" containerID="b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.616692 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee"} err="failed to get container status \"b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee\": rpc error: code = NotFound desc = could not find container \"b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee\": container with ID starting with b31368e38d0bf9eb3ce9ebee4c796c3b629a14cf3ba644ece1931397485063ee not found: ID does not exist" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.616713 4926 scope.go:117] "RemoveContainer" containerID="250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.617057 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d\": container with ID starting with 250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d not found: ID does not exist" containerID="250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.617077 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d"} err="failed to get container status \"250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d\": rpc error: code = NotFound desc = could not find container \"250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d\": container with ID starting with 250b15dc6d9c2f118f72721ba83424ae822445206734609f6b5514a35fd4109d not found: ID does not exist" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.617089 4926 scope.go:117] "RemoveContainer" containerID="33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.617406 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798\": container with ID starting with 33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798 not found: ID does not exist" containerID="33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.617424 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798"} err="failed to get container status \"33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798\": rpc error: code = NotFound desc = could not find container \"33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798\": container with ID starting with 33e3483e23b6482bcc4c8a329677a965f24f13aeacaba7490d9798d069d5d798 not found: ID does not exist" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.617438 4926 scope.go:117] "RemoveContainer" containerID="3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8" Nov 25 18:33:38 crc kubenswrapper[4926]: E1125 18:33:38.617769 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8\": container with ID starting with 3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8 not found: ID does not exist" containerID="3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.617789 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8"} err="failed to get container status \"3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8\": rpc error: code = NotFound desc = could not find container \"3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8\": container with ID starting with 3c8683486f5a4683755b4549aaedba2a0dc42a127ea54b124e6e1d630722e4c8 not found: ID does not exist" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.617801 4926 scope.go:117] "RemoveContainer" containerID="1ab43ba372074d6a02b0f693a9b2a90660d4d8877b41f35e8307d4e28bc52f2c" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645702 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-log-httpd\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645747 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p9w4\" (UniqueName: \"kubernetes.io/projected/2f779cf8-e6fe-4830-a95f-a807179a659e-kube-api-access-4p9w4\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645791 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645822 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645839 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-logs\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645870 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-scripts\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645887 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-config-data\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645907 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jmw6\" (UniqueName: \"kubernetes.io/projected/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-kube-api-access-8jmw6\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645926 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645954 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.645982 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-config-data\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.646024 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.646047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.646064 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-scripts\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.646086 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-run-httpd\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.650951 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.651387 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.651639 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-logs\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.654477 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-scripts\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.654956 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.655055 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-config-data\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.662147 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.671385 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jmw6\" (UniqueName: \"kubernetes.io/projected/115acdd7-4a4c-420d-9dea-6e821cbf8bc9-kube-api-access-8jmw6\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.691250 4926 scope.go:117] "RemoveContainer" containerID="ac46ce64187e0857cfede484e0b57b8c0d0af2323862d2086ca42f29b7e939e5" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.748267 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.748451 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-scripts\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.748491 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-run-httpd\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.748518 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-log-httpd\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.748573 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p9w4\" (UniqueName: \"kubernetes.io/projected/2f779cf8-e6fe-4830-a95f-a807179a659e-kube-api-access-4p9w4\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.748636 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.748691 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-config-data\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.749821 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-run-httpd\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.750129 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-log-httpd\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.751602 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"115acdd7-4a4c-420d-9dea-6e821cbf8bc9\") " pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.753414 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.761127 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-scripts\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.763701 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-config-data\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.764792 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.774161 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p9w4\" (UniqueName: \"kubernetes.io/projected/2f779cf8-e6fe-4830-a95f-a807179a659e-kube-api-access-4p9w4\") pod \"ceilometer-0\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.782821 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.783646 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.788901 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.817282 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.954731 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-combined-ca-bundle\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.954798 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-logs\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.954861 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-scripts\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.954894 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-internal-tls-certs\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.954957 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-config-data\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.954993 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.955011 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-httpd-run\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.955075 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77jh5\" (UniqueName: \"kubernetes.io/projected/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-kube-api-access-77jh5\") pod \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\" (UID: \"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841\") " Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.957024 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.957441 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-logs" (OuterVolumeSpecName: "logs") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.961626 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.963515 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-scripts" (OuterVolumeSpecName: "scripts") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:38 crc kubenswrapper[4926]: I1125 18:33:38.969586 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-kube-api-access-77jh5" (OuterVolumeSpecName: "kube-api-access-77jh5") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "kube-api-access-77jh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.012767 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.044493 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-config-data" (OuterVolumeSpecName: "config-data") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.058395 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.058425 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.058446 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.058821 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.058844 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77jh5\" (UniqueName: \"kubernetes.io/projected/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-kube-api-access-77jh5\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.058857 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.058866 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.070014 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" (UID: "8cbf5bf6-4cf7-42bd-8f71-89237d6dc841"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.112833 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.160865 4926 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.160912 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.214877 4926 generic.go:334] "Generic (PLEG): container finished" podID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerID="dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819" exitCode=0 Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.215030 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.215719 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841","Type":"ContainerDied","Data":"dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819"} Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.215756 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8cbf5bf6-4cf7-42bd-8f71-89237d6dc841","Type":"ContainerDied","Data":"c3e55a393cfa0ddf7b8bccf4e039290d6e8e6719b33e2cb232c4706ede868ccd"} Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.215776 4926 scope.go:117] "RemoveContainer" containerID="dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.271426 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.272649 4926 scope.go:117] "RemoveContainer" containerID="52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.284448 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.308099 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:33:39 crc kubenswrapper[4926]: E1125 18:33:39.308672 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-log" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.308685 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-log" Nov 25 18:33:39 crc kubenswrapper[4926]: E1125 18:33:39.308712 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-httpd" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.308718 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-httpd" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.308922 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-httpd" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.308948 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" containerName="glance-log" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.310186 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.315397 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.315791 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.320251 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.332282 4926 scope.go:117] "RemoveContainer" containerID="dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819" Nov 25 18:33:39 crc kubenswrapper[4926]: E1125 18:33:39.334233 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819\": container with ID starting with dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819 not found: ID does not exist" containerID="dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.334280 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819"} err="failed to get container status \"dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819\": rpc error: code = NotFound desc = could not find container \"dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819\": container with ID starting with dd51888b1b9775d327317200979f54f65cf89732d7ecd292717d00b2203f1819 not found: ID does not exist" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.334309 4926 scope.go:117] "RemoveContainer" containerID="52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca" Nov 25 18:33:39 crc kubenswrapper[4926]: E1125 18:33:39.334678 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca\": container with ID starting with 52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca not found: ID does not exist" containerID="52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.334720 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca"} err="failed to get container status \"52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca\": rpc error: code = NotFound desc = could not find container \"52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca\": container with ID starting with 52eb26ff3e9cdf01e587efc7c19046ad40b6977b731dc67c6016f175286d04ca not found: ID does not exist" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.335294 4926 scope.go:117] "RemoveContainer" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.380790 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:39 crc kubenswrapper[4926]: W1125 18:33:39.382563 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f779cf8_e6fe_4830_a95f_a807179a659e.slice/crio-602b7f186003dc7e986ebd7bd04de62705775eae9ea094baab5a99ab92a67ab2 WatchSource:0}: Error finding container 602b7f186003dc7e986ebd7bd04de62705775eae9ea094baab5a99ab92a67ab2: Status 404 returned error can't find the container with id 602b7f186003dc7e986ebd7bd04de62705775eae9ea094baab5a99ab92a67ab2 Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.472426 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.472670 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.472820 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.473017 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1d50ae6-b28d-4604-94dd-81987f5b63fa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.473191 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.473290 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1d50ae6-b28d-4604-94dd-81987f5b63fa-logs\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.473408 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.473567 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwtjl\" (UniqueName: \"kubernetes.io/projected/f1d50ae6-b28d-4604-94dd-81987f5b63fa-kube-api-access-qwtjl\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.574877 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.574925 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.574953 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.574992 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1d50ae6-b28d-4604-94dd-81987f5b63fa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.575046 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.575063 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1d50ae6-b28d-4604-94dd-81987f5b63fa-logs\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.575079 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.575111 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwtjl\" (UniqueName: \"kubernetes.io/projected/f1d50ae6-b28d-4604-94dd-81987f5b63fa-kube-api-access-qwtjl\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.578898 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1d50ae6-b28d-4604-94dd-81987f5b63fa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.579585 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.580922 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1d50ae6-b28d-4604-94dd-81987f5b63fa-logs\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.585631 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.592320 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.612624 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.621346 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwtjl\" (UniqueName: \"kubernetes.io/projected/f1d50ae6-b28d-4604-94dd-81987f5b63fa-kube-api-access-qwtjl\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.625611 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d50ae6-b28d-4604-94dd-81987f5b63fa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.649135 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.674464 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"f1d50ae6-b28d-4604-94dd-81987f5b63fa\") " pod="openstack/glance-default-internal-api-0" Nov 25 18:33:39 crc kubenswrapper[4926]: I1125 18:33:39.952470 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.250933 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerStarted","Data":"e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792"} Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.255668 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerStarted","Data":"337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be"} Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.255689 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerStarted","Data":"602b7f186003dc7e986ebd7bd04de62705775eae9ea094baab5a99ab92a67ab2"} Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.257572 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"115acdd7-4a4c-420d-9dea-6e821cbf8bc9","Type":"ContainerStarted","Data":"877bd11a841ee8e7cd761e34f56de6542fa5c25b0c016875dd7427c276a851d2"} Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.373661 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18b3f2f7-c24a-4cbe-af19-4a124a7b393e" path="/var/lib/kubelet/pods/18b3f2f7-c24a-4cbe-af19-4a124a7b393e/volumes" Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.374752 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3153a465-758c-4abd-bd9c-2087ab1eebd0" path="/var/lib/kubelet/pods/3153a465-758c-4abd-bd9c-2087ab1eebd0/volumes" Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.376100 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cbf5bf6-4cf7-42bd-8f71-89237d6dc841" path="/var/lib/kubelet/pods/8cbf5bf6-4cf7-42bd-8f71-89237d6dc841/volumes" Nov 25 18:33:40 crc kubenswrapper[4926]: I1125 18:33:40.842574 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 18:33:41 crc kubenswrapper[4926]: I1125 18:33:41.283855 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f1d50ae6-b28d-4604-94dd-81987f5b63fa","Type":"ContainerStarted","Data":"4f22703cc5b63187038ad90c635843418bf8ac92aa0f260d20484fd4e0990765"} Nov 25 18:33:41 crc kubenswrapper[4926]: I1125 18:33:41.311031 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerStarted","Data":"7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1"} Nov 25 18:33:41 crc kubenswrapper[4926]: I1125 18:33:41.311078 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerStarted","Data":"f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c"} Nov 25 18:33:41 crc kubenswrapper[4926]: I1125 18:33:41.316246 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"115acdd7-4a4c-420d-9dea-6e821cbf8bc9","Type":"ContainerStarted","Data":"bfb5d3bec0cba25d9d663eb10086e945c335b9dfdf84776c1e3cfd7bd420f1b1"} Nov 25 18:33:42 crc kubenswrapper[4926]: I1125 18:33:42.340524 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"115acdd7-4a4c-420d-9dea-6e821cbf8bc9","Type":"ContainerStarted","Data":"e199731a54edb13b6016c9b95e7b6bc6027f0a21c1ad903d1c8b7a6b7dd336a9"} Nov 25 18:33:42 crc kubenswrapper[4926]: I1125 18:33:42.341278 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f1d50ae6-b28d-4604-94dd-81987f5b63fa","Type":"ContainerStarted","Data":"81c73e1984b68946a39d7db21ac0a77ba5bc6c294649776f89fb971d530d4727"} Nov 25 18:33:42 crc kubenswrapper[4926]: I1125 18:33:42.365634 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.365608053 podStartE2EDuration="4.365608053s" podCreationTimestamp="2025-11-25 18:33:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:33:42.352126469 +0000 UTC m=+1252.737640074" watchObservedRunningTime="2025-11-25 18:33:42.365608053 +0000 UTC m=+1252.751121668" Nov 25 18:33:43 crc kubenswrapper[4926]: I1125 18:33:43.343640 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f1d50ae6-b28d-4604-94dd-81987f5b63fa","Type":"ContainerStarted","Data":"66732fdfdce424162e257f1303cec1e8f004b3f7dc972b1138ff5404767adaaf"} Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.356078 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerStarted","Data":"02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d"} Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.356473 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.356193 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="sg-core" containerID="cri-o://7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1" gracePeriod=30 Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.356126 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-central-agent" containerID="cri-o://337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be" gracePeriod=30 Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.356242 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-notification-agent" containerID="cri-o://f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c" gracePeriod=30 Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.356242 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="proxy-httpd" containerID="cri-o://02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d" gracePeriod=30 Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.382168 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.382148307 podStartE2EDuration="5.382148307s" podCreationTimestamp="2025-11-25 18:33:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:33:44.377263566 +0000 UTC m=+1254.762777171" watchObservedRunningTime="2025-11-25 18:33:44.382148307 +0000 UTC m=+1254.767661902" Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.419826 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.41663961 podStartE2EDuration="6.419801698s" podCreationTimestamp="2025-11-25 18:33:38 +0000 UTC" firstStartedPulling="2025-11-25 18:33:39.396016337 +0000 UTC m=+1249.781529942" lastFinishedPulling="2025-11-25 18:33:43.399178425 +0000 UTC m=+1253.784692030" observedRunningTime="2025-11-25 18:33:44.412333394 +0000 UTC m=+1254.797846999" watchObservedRunningTime="2025-11-25 18:33:44.419801698 +0000 UTC m=+1254.805315293" Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.915120 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:44 crc kubenswrapper[4926]: I1125 18:33:44.948925 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.367762 4926 generic.go:334] "Generic (PLEG): container finished" podID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerID="02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d" exitCode=0 Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.368318 4926 generic.go:334] "Generic (PLEG): container finished" podID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerID="7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1" exitCode=2 Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.368336 4926 generic.go:334] "Generic (PLEG): container finished" podID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerID="f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c" exitCode=0 Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.367835 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerDied","Data":"02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d"} Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.368418 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerDied","Data":"7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1"} Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.368441 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerDied","Data":"f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c"} Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.368568 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:45 crc kubenswrapper[4926]: I1125 18:33:45.410258 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.311767 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.377399 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p9w4\" (UniqueName: \"kubernetes.io/projected/2f779cf8-e6fe-4830-a95f-a807179a659e-kube-api-access-4p9w4\") pod \"2f779cf8-e6fe-4830-a95f-a807179a659e\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.377897 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-log-httpd\") pod \"2f779cf8-e6fe-4830-a95f-a807179a659e\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.377939 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-run-httpd\") pod \"2f779cf8-e6fe-4830-a95f-a807179a659e\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.377984 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-sg-core-conf-yaml\") pod \"2f779cf8-e6fe-4830-a95f-a807179a659e\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.378057 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-combined-ca-bundle\") pod \"2f779cf8-e6fe-4830-a95f-a807179a659e\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.378084 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-config-data\") pod \"2f779cf8-e6fe-4830-a95f-a807179a659e\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.378271 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-scripts\") pod \"2f779cf8-e6fe-4830-a95f-a807179a659e\" (UID: \"2f779cf8-e6fe-4830-a95f-a807179a659e\") " Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.381290 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2f779cf8-e6fe-4830-a95f-a807179a659e" (UID: "2f779cf8-e6fe-4830-a95f-a807179a659e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.381480 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2f779cf8-e6fe-4830-a95f-a807179a659e" (UID: "2f779cf8-e6fe-4830-a95f-a807179a659e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.384603 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f779cf8-e6fe-4830-a95f-a807179a659e-kube-api-access-4p9w4" (OuterVolumeSpecName: "kube-api-access-4p9w4") pod "2f779cf8-e6fe-4830-a95f-a807179a659e" (UID: "2f779cf8-e6fe-4830-a95f-a807179a659e"). InnerVolumeSpecName "kube-api-access-4p9w4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.386757 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-scripts" (OuterVolumeSpecName: "scripts") pod "2f779cf8-e6fe-4830-a95f-a807179a659e" (UID: "2f779cf8-e6fe-4830-a95f-a807179a659e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.410788 4926 generic.go:334] "Generic (PLEG): container finished" podID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerID="337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be" exitCode=0 Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.410849 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerDied","Data":"337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be"} Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.410896 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f779cf8-e6fe-4830-a95f-a807179a659e","Type":"ContainerDied","Data":"602b7f186003dc7e986ebd7bd04de62705775eae9ea094baab5a99ab92a67ab2"} Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.410903 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.410921 4926 scope.go:117] "RemoveContainer" containerID="02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.422285 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2f779cf8-e6fe-4830-a95f-a807179a659e" (UID: "2f779cf8-e6fe-4830-a95f-a807179a659e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.481808 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p9w4\" (UniqueName: \"kubernetes.io/projected/2f779cf8-e6fe-4830-a95f-a807179a659e-kube-api-access-4p9w4\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.481926 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.481938 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f779cf8-e6fe-4830-a95f-a807179a659e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.481947 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.481958 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.503020 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f779cf8-e6fe-4830-a95f-a807179a659e" (UID: "2f779cf8-e6fe-4830-a95f-a807179a659e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.503773 4926 scope.go:117] "RemoveContainer" containerID="7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.514643 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-config-data" (OuterVolumeSpecName: "config-data") pod "2f779cf8-e6fe-4830-a95f-a807179a659e" (UID: "2f779cf8-e6fe-4830-a95f-a807179a659e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.527149 4926 scope.go:117] "RemoveContainer" containerID="f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.556229 4926 scope.go:117] "RemoveContainer" containerID="337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.582906 4926 scope.go:117] "RemoveContainer" containerID="02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.583236 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.583257 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f779cf8-e6fe-4830-a95f-a807179a659e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.583508 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d\": container with ID starting with 02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d not found: ID does not exist" containerID="02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.583559 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d"} err="failed to get container status \"02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d\": rpc error: code = NotFound desc = could not find container \"02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d\": container with ID starting with 02b239207a1faba45a6e76bacce9d2f55b3802b0226c87982fbf8d8993c3b84d not found: ID does not exist" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.583590 4926 scope.go:117] "RemoveContainer" containerID="7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1" Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.584101 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1\": container with ID starting with 7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1 not found: ID does not exist" containerID="7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.584142 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1"} err="failed to get container status \"7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1\": rpc error: code = NotFound desc = could not find container \"7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1\": container with ID starting with 7b9c1142b4f19bbb5848d7a2f52a4b9889e1ca994b2521ece1c9fb0ce1c44dc1 not found: ID does not exist" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.584168 4926 scope.go:117] "RemoveContainer" containerID="f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c" Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.585009 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c\": container with ID starting with f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c not found: ID does not exist" containerID="f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.585059 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c"} err="failed to get container status \"f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c\": rpc error: code = NotFound desc = could not find container \"f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c\": container with ID starting with f325a461d9367a81d86323ca55094f84ddb31dde4c798e197bd8a696b6fd069c not found: ID does not exist" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.585091 4926 scope.go:117] "RemoveContainer" containerID="337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be" Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.585452 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be\": container with ID starting with 337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be not found: ID does not exist" containerID="337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.585484 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be"} err="failed to get container status \"337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be\": rpc error: code = NotFound desc = could not find container \"337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be\": container with ID starting with 337d4151cfa0d4e9faa1a49d01379721c5915879516e80c6d54b83a60d8974be not found: ID does not exist" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.686864 4926 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod2add9e5e-d863-4ecc-9778-c932b0532956"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod2add9e5e-d863-4ecc-9778-c932b0532956] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2add9e5e_d863_4ecc_9778_c932b0532956.slice" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.752621 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.769154 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.780962 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.781487 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-central-agent" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781519 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-central-agent" Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.781561 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="sg-core" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781570 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="sg-core" Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.781584 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-notification-agent" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781595 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-notification-agent" Nov 25 18:33:48 crc kubenswrapper[4926]: E1125 18:33:48.781631 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="proxy-httpd" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781645 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="proxy-httpd" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781911 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-central-agent" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781947 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="sg-core" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781973 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="proxy-httpd" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.781996 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" containerName="ceilometer-notification-agent" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.784401 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.788734 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.790313 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.790494 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.790822 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.790947 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.834431 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.843036 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.887215 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2x6m\" (UniqueName: \"kubernetes.io/projected/7bd8598b-0150-4e47-92b7-5988a2640401-kube-api-access-x2x6m\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.887271 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-log-httpd\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.887323 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-run-httpd\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.887563 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.887721 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-config-data\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.887874 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-scripts\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.887915 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.996888 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.996993 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-config-data\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.997068 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-scripts\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.997101 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.997163 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2x6m\" (UniqueName: \"kubernetes.io/projected/7bd8598b-0150-4e47-92b7-5988a2640401-kube-api-access-x2x6m\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.997211 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-log-httpd\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.997280 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-run-httpd\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:48 crc kubenswrapper[4926]: I1125 18:33:48.998166 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-log-httpd\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:48.998660 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-run-httpd\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.002409 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.003412 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-scripts\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.013740 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-config-data\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.015035 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.020588 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2x6m\" (UniqueName: \"kubernetes.io/projected/7bd8598b-0150-4e47-92b7-5988a2640401-kube-api-access-x2x6m\") pod \"ceilometer-0\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.125252 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.402551 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:49 crc kubenswrapper[4926]: W1125 18:33:49.405279 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7bd8598b_0150_4e47_92b7_5988a2640401.slice/crio-3d0e5462eb3ee18d3a07de39f902272e356491d67370345e0ffe4bc72b8a42a0 WatchSource:0}: Error finding container 3d0e5462eb3ee18d3a07de39f902272e356491d67370345e0ffe4bc72b8a42a0: Status 404 returned error can't find the container with id 3d0e5462eb3ee18d3a07de39f902272e356491d67370345e0ffe4bc72b8a42a0 Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.420212 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerStarted","Data":"3d0e5462eb3ee18d3a07de39f902272e356491d67370345e0ffe4bc72b8a42a0"} Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.423310 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.423347 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.953068 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.953432 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:49 crc kubenswrapper[4926]: I1125 18:33:49.986573 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:50 crc kubenswrapper[4926]: I1125 18:33:50.003694 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:50 crc kubenswrapper[4926]: I1125 18:33:50.385916 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f779cf8-e6fe-4830-a95f-a807179a659e" path="/var/lib/kubelet/pods/2f779cf8-e6fe-4830-a95f-a807179a659e/volumes" Nov 25 18:33:50 crc kubenswrapper[4926]: I1125 18:33:50.441964 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerStarted","Data":"5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea"} Nov 25 18:33:50 crc kubenswrapper[4926]: I1125 18:33:50.441999 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerStarted","Data":"38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901"} Nov 25 18:33:50 crc kubenswrapper[4926]: I1125 18:33:50.444669 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:50 crc kubenswrapper[4926]: I1125 18:33:50.444691 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:51 crc kubenswrapper[4926]: I1125 18:33:51.470897 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerStarted","Data":"3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2"} Nov 25 18:33:51 crc kubenswrapper[4926]: I1125 18:33:51.570090 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 18:33:51 crc kubenswrapper[4926]: I1125 18:33:51.570209 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 18:33:51 crc kubenswrapper[4926]: I1125 18:33:51.688265 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 18:33:52 crc kubenswrapper[4926]: I1125 18:33:52.415002 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:52 crc kubenswrapper[4926]: I1125 18:33:52.415554 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 18:33:52 crc kubenswrapper[4926]: I1125 18:33:52.527627 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerStarted","Data":"624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d"} Nov 25 18:33:52 crc kubenswrapper[4926]: I1125 18:33:52.528146 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:33:52 crc kubenswrapper[4926]: I1125 18:33:52.551646 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.933441138 podStartE2EDuration="4.551628726s" podCreationTimestamp="2025-11-25 18:33:48 +0000 UTC" firstStartedPulling="2025-11-25 18:33:49.407096366 +0000 UTC m=+1259.792609971" lastFinishedPulling="2025-11-25 18:33:52.025283964 +0000 UTC m=+1262.410797559" observedRunningTime="2025-11-25 18:33:52.547526215 +0000 UTC m=+1262.933039820" watchObservedRunningTime="2025-11-25 18:33:52.551628726 +0000 UTC m=+1262.937142331" Nov 25 18:33:54 crc kubenswrapper[4926]: I1125 18:33:54.547482 4926 generic.go:334] "Generic (PLEG): container finished" podID="6986f889-6366-45eb-8f6a-e52588461c3e" containerID="4e87aad84ec26afd36c0bba5c402fb5a163f9511a289b417105126661b4feeb4" exitCode=0 Nov 25 18:33:54 crc kubenswrapper[4926]: I1125 18:33:54.547863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" event={"ID":"6986f889-6366-45eb-8f6a-e52588461c3e","Type":"ContainerDied","Data":"4e87aad84ec26afd36c0bba5c402fb5a163f9511a289b417105126661b4feeb4"} Nov 25 18:33:55 crc kubenswrapper[4926]: I1125 18:33:55.354966 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:33:55 crc kubenswrapper[4926]: I1125 18:33:55.355207 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" containerID="cri-o://e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792" gracePeriod=30 Nov 25 18:33:55 crc kubenswrapper[4926]: I1125 18:33:55.915459 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.035590 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-scripts\") pod \"6986f889-6366-45eb-8f6a-e52588461c3e\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.035738 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqpmc\" (UniqueName: \"kubernetes.io/projected/6986f889-6366-45eb-8f6a-e52588461c3e-kube-api-access-sqpmc\") pod \"6986f889-6366-45eb-8f6a-e52588461c3e\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.035850 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-config-data\") pod \"6986f889-6366-45eb-8f6a-e52588461c3e\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.035886 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-combined-ca-bundle\") pod \"6986f889-6366-45eb-8f6a-e52588461c3e\" (UID: \"6986f889-6366-45eb-8f6a-e52588461c3e\") " Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.041834 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-scripts" (OuterVolumeSpecName: "scripts") pod "6986f889-6366-45eb-8f6a-e52588461c3e" (UID: "6986f889-6366-45eb-8f6a-e52588461c3e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.041866 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6986f889-6366-45eb-8f6a-e52588461c3e-kube-api-access-sqpmc" (OuterVolumeSpecName: "kube-api-access-sqpmc") pod "6986f889-6366-45eb-8f6a-e52588461c3e" (UID: "6986f889-6366-45eb-8f6a-e52588461c3e"). InnerVolumeSpecName "kube-api-access-sqpmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.075222 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-config-data" (OuterVolumeSpecName: "config-data") pod "6986f889-6366-45eb-8f6a-e52588461c3e" (UID: "6986f889-6366-45eb-8f6a-e52588461c3e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.080333 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6986f889-6366-45eb-8f6a-e52588461c3e" (UID: "6986f889-6366-45eb-8f6a-e52588461c3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.138137 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.138182 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqpmc\" (UniqueName: \"kubernetes.io/projected/6986f889-6366-45eb-8f6a-e52588461c3e-kube-api-access-sqpmc\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.138197 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.138209 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6986f889-6366-45eb-8f6a-e52588461c3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.565341 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" event={"ID":"6986f889-6366-45eb-8f6a-e52588461c3e","Type":"ContainerDied","Data":"74df1ce7ad776c4fe3873d8959b18dd3353720fdffbd0d8d9214075aa41b818f"} Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.565761 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74df1ce7ad776c4fe3873d8959b18dd3353720fdffbd0d8d9214075aa41b818f" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.565451 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nxqr8" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.683751 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 18:33:56 crc kubenswrapper[4926]: E1125 18:33:56.684222 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6986f889-6366-45eb-8f6a-e52588461c3e" containerName="nova-cell0-conductor-db-sync" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.684247 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6986f889-6366-45eb-8f6a-e52588461c3e" containerName="nova-cell0-conductor-db-sync" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.685611 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6986f889-6366-45eb-8f6a-e52588461c3e" containerName="nova-cell0-conductor-db-sync" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.686519 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.695517 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.695538 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-bsltd" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.695583 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.748335 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c274cca-be43-418d-9c50-adf2a19334e7-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.748450 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c274cca-be43-418d-9c50-adf2a19334e7-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.748511 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtdvw\" (UniqueName: \"kubernetes.io/projected/3c274cca-be43-418d-9c50-adf2a19334e7-kube-api-access-mtdvw\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.849896 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtdvw\" (UniqueName: \"kubernetes.io/projected/3c274cca-be43-418d-9c50-adf2a19334e7-kube-api-access-mtdvw\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.849995 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c274cca-be43-418d-9c50-adf2a19334e7-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.850089 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c274cca-be43-418d-9c50-adf2a19334e7-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.854931 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c274cca-be43-418d-9c50-adf2a19334e7-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.857232 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c274cca-be43-418d-9c50-adf2a19334e7-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:56 crc kubenswrapper[4926]: I1125 18:33:56.875463 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtdvw\" (UniqueName: \"kubernetes.io/projected/3c274cca-be43-418d-9c50-adf2a19334e7-kube-api-access-mtdvw\") pod \"nova-cell0-conductor-0\" (UID: \"3c274cca-be43-418d-9c50-adf2a19334e7\") " pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.003798 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.284671 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.285159 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-central-agent" containerID="cri-o://38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901" gracePeriod=30 Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.285278 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="sg-core" containerID="cri-o://3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2" gracePeriod=30 Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.285316 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-notification-agent" containerID="cri-o://5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea" gracePeriod=30 Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.285357 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="proxy-httpd" containerID="cri-o://624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d" gracePeriod=30 Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.512237 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.602574 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3c274cca-be43-418d-9c50-adf2a19334e7","Type":"ContainerStarted","Data":"ef1c8c1bd3e85d153a1f3afe1bfa12f483c63a1908885605748c3dc2d8c6dad2"} Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.621050 4926 generic.go:334] "Generic (PLEG): container finished" podID="7bd8598b-0150-4e47-92b7-5988a2640401" containerID="624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d" exitCode=0 Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.621097 4926 generic.go:334] "Generic (PLEG): container finished" podID="7bd8598b-0150-4e47-92b7-5988a2640401" containerID="3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2" exitCode=2 Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.621117 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerDied","Data":"624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d"} Nov 25 18:33:57 crc kubenswrapper[4926]: I1125 18:33:57.621143 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerDied","Data":"3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2"} Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.445458 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.481231 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jq72\" (UniqueName: \"kubernetes.io/projected/9f664100-2926-4e80-a06e-5c09021eb736-kube-api-access-4jq72\") pod \"9f664100-2926-4e80-a06e-5c09021eb736\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.481374 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-config-data\") pod \"9f664100-2926-4e80-a06e-5c09021eb736\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.481430 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f664100-2926-4e80-a06e-5c09021eb736-logs\") pod \"9f664100-2926-4e80-a06e-5c09021eb736\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.481483 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-combined-ca-bundle\") pod \"9f664100-2926-4e80-a06e-5c09021eb736\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.481576 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-custom-prometheus-ca\") pod \"9f664100-2926-4e80-a06e-5c09021eb736\" (UID: \"9f664100-2926-4e80-a06e-5c09021eb736\") " Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.490931 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f664100-2926-4e80-a06e-5c09021eb736-logs" (OuterVolumeSpecName: "logs") pod "9f664100-2926-4e80-a06e-5c09021eb736" (UID: "9f664100-2926-4e80-a06e-5c09021eb736"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.496167 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f664100-2926-4e80-a06e-5c09021eb736-kube-api-access-4jq72" (OuterVolumeSpecName: "kube-api-access-4jq72") pod "9f664100-2926-4e80-a06e-5c09021eb736" (UID: "9f664100-2926-4e80-a06e-5c09021eb736"). InnerVolumeSpecName "kube-api-access-4jq72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.514037 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "9f664100-2926-4e80-a06e-5c09021eb736" (UID: "9f664100-2926-4e80-a06e-5c09021eb736"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.525911 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9f664100-2926-4e80-a06e-5c09021eb736" (UID: "9f664100-2926-4e80-a06e-5c09021eb736"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.554249 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-config-data" (OuterVolumeSpecName: "config-data") pod "9f664100-2926-4e80-a06e-5c09021eb736" (UID: "9f664100-2926-4e80-a06e-5c09021eb736"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.588771 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.588814 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f664100-2926-4e80-a06e-5c09021eb736-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.588827 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.588842 4926 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/9f664100-2926-4e80-a06e-5c09021eb736-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.588853 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jq72\" (UniqueName: \"kubernetes.io/projected/9f664100-2926-4e80-a06e-5c09021eb736-kube-api-access-4jq72\") on node \"crc\" DevicePath \"\"" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.633139 4926 generic.go:334] "Generic (PLEG): container finished" podID="7bd8598b-0150-4e47-92b7-5988a2640401" containerID="5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea" exitCode=0 Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.633228 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerDied","Data":"5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea"} Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.635774 4926 generic.go:334] "Generic (PLEG): container finished" podID="9f664100-2926-4e80-a06e-5c09021eb736" containerID="e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792" exitCode=0 Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.635912 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerDied","Data":"e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792"} Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.635984 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"9f664100-2926-4e80-a06e-5c09021eb736","Type":"ContainerDied","Data":"11eb4fa6a1b17809506f3d2b5eeba6a3c4b1011fa8100da7126a7bab8c986efe"} Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.636012 4926 scope.go:117] "RemoveContainer" containerID="e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.635909 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.638213 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3c274cca-be43-418d-9c50-adf2a19334e7","Type":"ContainerStarted","Data":"007c391a40d96abf191a726ec9dcab62b984cd522b998c8d666ecb0b485bb6b2"} Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.639636 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.659759 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.659743645 podStartE2EDuration="2.659743645s" podCreationTimestamp="2025-11-25 18:33:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:33:58.657694394 +0000 UTC m=+1269.043207999" watchObservedRunningTime="2025-11-25 18:33:58.659743645 +0000 UTC m=+1269.045257250" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.673556 4926 scope.go:117] "RemoveContainer" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.680358 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.687586 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.700001 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:33:58 crc kubenswrapper[4926]: E1125 18:33:58.700510 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.700528 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: E1125 18:33:58.700558 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.700566 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: E1125 18:33:58.700580 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.700587 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.700875 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.700897 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.700910 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.701566 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.706288 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.714297 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.745488 4926 scope.go:117] "RemoveContainer" containerID="e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792" Nov 25 18:33:58 crc kubenswrapper[4926]: E1125 18:33:58.747640 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792\": container with ID starting with e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792 not found: ID does not exist" containerID="e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.747684 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792"} err="failed to get container status \"e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792\": rpc error: code = NotFound desc = could not find container \"e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792\": container with ID starting with e236878287b0314716956c674f42b1061bd709140f8b39fa557f555ac2806792 not found: ID does not exist" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.747721 4926 scope.go:117] "RemoveContainer" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:58 crc kubenswrapper[4926]: E1125 18:33:58.747994 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187\": container with ID starting with 56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187 not found: ID does not exist" containerID="56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.748021 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187"} err="failed to get container status \"56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187\": rpc error: code = NotFound desc = could not find container \"56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187\": container with ID starting with 56ebd15d7b344fffd93a9c0dba9ea3afdbe47abb2c57cf34f4386bf0369a1187 not found: ID does not exist" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.795460 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-config-data\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.795669 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29d2e355-bed6-4dab-98c3-e2dc1134d327-logs\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.795768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.795835 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.796054 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sfb9\" (UniqueName: \"kubernetes.io/projected/29d2e355-bed6-4dab-98c3-e2dc1134d327-kube-api-access-9sfb9\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.898527 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29d2e355-bed6-4dab-98c3-e2dc1134d327-logs\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.898031 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29d2e355-bed6-4dab-98c3-e2dc1134d327-logs\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.898677 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.899539 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.899679 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sfb9\" (UniqueName: \"kubernetes.io/projected/29d2e355-bed6-4dab-98c3-e2dc1134d327-kube-api-access-9sfb9\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.899806 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-config-data\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.904718 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.907673 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.908498 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29d2e355-bed6-4dab-98c3-e2dc1134d327-config-data\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:58 crc kubenswrapper[4926]: I1125 18:33:58.928912 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sfb9\" (UniqueName: \"kubernetes.io/projected/29d2e355-bed6-4dab-98c3-e2dc1134d327-kube-api-access-9sfb9\") pod \"watcher-decision-engine-0\" (UID: \"29d2e355-bed6-4dab-98c3-e2dc1134d327\") " pod="openstack/watcher-decision-engine-0" Nov 25 18:33:59 crc kubenswrapper[4926]: I1125 18:33:59.024012 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 25 18:33:59 crc kubenswrapper[4926]: I1125 18:33:59.495149 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 25 18:33:59 crc kubenswrapper[4926]: W1125 18:33:59.500358 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29d2e355_bed6_4dab_98c3_e2dc1134d327.slice/crio-06ab964564f9a98baeb4a37acc53a854fb50dc69c9aae2c3b54ec9d30b5d631d WatchSource:0}: Error finding container 06ab964564f9a98baeb4a37acc53a854fb50dc69c9aae2c3b54ec9d30b5d631d: Status 404 returned error can't find the container with id 06ab964564f9a98baeb4a37acc53a854fb50dc69c9aae2c3b54ec9d30b5d631d Nov 25 18:33:59 crc kubenswrapper[4926]: I1125 18:33:59.650389 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"29d2e355-bed6-4dab-98c3-e2dc1134d327","Type":"ContainerStarted","Data":"06ab964564f9a98baeb4a37acc53a854fb50dc69c9aae2c3b54ec9d30b5d631d"} Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.339293 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f664100-2926-4e80-a06e-5c09021eb736" path="/var/lib/kubelet/pods/9f664100-2926-4e80-a06e-5c09021eb736/volumes" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.402325 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.527670 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2x6m\" (UniqueName: \"kubernetes.io/projected/7bd8598b-0150-4e47-92b7-5988a2640401-kube-api-access-x2x6m\") pod \"7bd8598b-0150-4e47-92b7-5988a2640401\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.527750 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-log-httpd\") pod \"7bd8598b-0150-4e47-92b7-5988a2640401\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.527791 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-sg-core-conf-yaml\") pod \"7bd8598b-0150-4e47-92b7-5988a2640401\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.527842 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-config-data\") pod \"7bd8598b-0150-4e47-92b7-5988a2640401\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.527938 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-combined-ca-bundle\") pod \"7bd8598b-0150-4e47-92b7-5988a2640401\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.527960 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-scripts\") pod \"7bd8598b-0150-4e47-92b7-5988a2640401\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.528030 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-run-httpd\") pod \"7bd8598b-0150-4e47-92b7-5988a2640401\" (UID: \"7bd8598b-0150-4e47-92b7-5988a2640401\") " Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.528937 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7bd8598b-0150-4e47-92b7-5988a2640401" (UID: "7bd8598b-0150-4e47-92b7-5988a2640401"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.529536 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7bd8598b-0150-4e47-92b7-5988a2640401" (UID: "7bd8598b-0150-4e47-92b7-5988a2640401"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.532593 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-scripts" (OuterVolumeSpecName: "scripts") pod "7bd8598b-0150-4e47-92b7-5988a2640401" (UID: "7bd8598b-0150-4e47-92b7-5988a2640401"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.535009 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bd8598b-0150-4e47-92b7-5988a2640401-kube-api-access-x2x6m" (OuterVolumeSpecName: "kube-api-access-x2x6m") pod "7bd8598b-0150-4e47-92b7-5988a2640401" (UID: "7bd8598b-0150-4e47-92b7-5988a2640401"). InnerVolumeSpecName "kube-api-access-x2x6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.560828 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7bd8598b-0150-4e47-92b7-5988a2640401" (UID: "7bd8598b-0150-4e47-92b7-5988a2640401"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.621471 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bd8598b-0150-4e47-92b7-5988a2640401" (UID: "7bd8598b-0150-4e47-92b7-5988a2640401"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.630711 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2x6m\" (UniqueName: \"kubernetes.io/projected/7bd8598b-0150-4e47-92b7-5988a2640401-kube-api-access-x2x6m\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.630739 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.630751 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.630761 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.630769 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.630777 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bd8598b-0150-4e47-92b7-5988a2640401-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.643771 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-config-data" (OuterVolumeSpecName: "config-data") pod "7bd8598b-0150-4e47-92b7-5988a2640401" (UID: "7bd8598b-0150-4e47-92b7-5988a2640401"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.661860 4926 generic.go:334] "Generic (PLEG): container finished" podID="7bd8598b-0150-4e47-92b7-5988a2640401" containerID="38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901" exitCode=0 Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.661970 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.663736 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerDied","Data":"38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901"} Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.663770 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bd8598b-0150-4e47-92b7-5988a2640401","Type":"ContainerDied","Data":"3d0e5462eb3ee18d3a07de39f902272e356491d67370345e0ffe4bc72b8a42a0"} Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.663787 4926 scope.go:117] "RemoveContainer" containerID="624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.673091 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"29d2e355-bed6-4dab-98c3-e2dc1134d327","Type":"ContainerStarted","Data":"083da90032b65c1959166cba724a659832dbd1753209cf1c72f0d88661703f6a"} Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.699434 4926 scope.go:117] "RemoveContainer" containerID="3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.716026 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.716010301 podStartE2EDuration="2.716010301s" podCreationTimestamp="2025-11-25 18:33:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:00.699712818 +0000 UTC m=+1271.085226423" watchObservedRunningTime="2025-11-25 18:34:00.716010301 +0000 UTC m=+1271.101523896" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.721054 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.734108 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd8598b-0150-4e47-92b7-5988a2640401-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.735167 4926 scope.go:117] "RemoveContainer" containerID="5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.754447 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.771648 4926 scope.go:117] "RemoveContainer" containerID="38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.774451 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.774836 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="proxy-httpd" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.774853 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="proxy-httpd" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.774864 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-notification-agent" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.774872 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-notification-agent" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.774880 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="sg-core" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.774885 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="sg-core" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.774897 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.774903 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.774928 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-central-agent" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.774935 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-central-agent" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.775090 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-notification-agent" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.775101 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="sg-core" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.775109 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f664100-2926-4e80-a06e-5c09021eb736" containerName="watcher-decision-engine" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.775123 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="proxy-httpd" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.775142 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" containerName="ceilometer-central-agent" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.809186 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.812243 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.812780 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.815305 4926 scope.go:117] "RemoveContainer" containerID="624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.817005 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d\": container with ID starting with 624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d not found: ID does not exist" containerID="624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.817054 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d"} err="failed to get container status \"624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d\": rpc error: code = NotFound desc = could not find container \"624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d\": container with ID starting with 624a9d557ab1c2175b9a12bb286954a246a7385432a668887be492ca592cea6d not found: ID does not exist" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.817078 4926 scope.go:117] "RemoveContainer" containerID="3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.818498 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2\": container with ID starting with 3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2 not found: ID does not exist" containerID="3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.818639 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2"} err="failed to get container status \"3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2\": rpc error: code = NotFound desc = could not find container \"3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2\": container with ID starting with 3a601f6f3b9c9be12c0b9e7da420f9296dea480d5ec2e297eba42004415a1de2 not found: ID does not exist" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.818672 4926 scope.go:117] "RemoveContainer" containerID="5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.819516 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea\": container with ID starting with 5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea not found: ID does not exist" containerID="5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.819703 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea"} err="failed to get container status \"5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea\": rpc error: code = NotFound desc = could not find container \"5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea\": container with ID starting with 5f827b4e03bbecbdf3c2d9ea01aec553297fd00e6cf54c5daad44782dc9f0bea not found: ID does not exist" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.819874 4926 scope.go:117] "RemoveContainer" containerID="38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901" Nov 25 18:34:00 crc kubenswrapper[4926]: E1125 18:34:00.820784 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901\": container with ID starting with 38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901 not found: ID does not exist" containerID="38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.820819 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901"} err="failed to get container status \"38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901\": rpc error: code = NotFound desc = could not find container \"38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901\": container with ID starting with 38c9ed81c4d61529f3293d81f8578f5991cc247e9a504735a90e565a18081901 not found: ID does not exist" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.825281 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.836370 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-log-httpd\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.836870 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qm4qf\" (UniqueName: \"kubernetes.io/projected/438d3925-3d4b-4477-bd84-f7685a322498-kube-api-access-qm4qf\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.837076 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-config-data\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.838878 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-run-httpd\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.839547 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-scripts\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.839760 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.839923 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.942189 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-log-httpd\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.942466 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qm4qf\" (UniqueName: \"kubernetes.io/projected/438d3925-3d4b-4477-bd84-f7685a322498-kube-api-access-qm4qf\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.942598 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-config-data\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.942682 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-run-httpd\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.942818 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-scripts\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.942905 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-log-httpd\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.942913 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.943102 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.943298 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-run-httpd\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.948777 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.949079 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-scripts\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.949487 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.951098 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-config-data\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:00 crc kubenswrapper[4926]: I1125 18:34:00.961189 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qm4qf\" (UniqueName: \"kubernetes.io/projected/438d3925-3d4b-4477-bd84-f7685a322498-kube-api-access-qm4qf\") pod \"ceilometer-0\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " pod="openstack/ceilometer-0" Nov 25 18:34:01 crc kubenswrapper[4926]: I1125 18:34:01.127047 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:01 crc kubenswrapper[4926]: I1125 18:34:01.604609 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:01 crc kubenswrapper[4926]: I1125 18:34:01.681781 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerStarted","Data":"31950f3f028b9db5a499a6ee0f0ca94286f04bd94cc3401b487853675ec74a96"} Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.087486 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.340882 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bd8598b-0150-4e47-92b7-5988a2640401" path="/var/lib/kubelet/pods/7bd8598b-0150-4e47-92b7-5988a2640401/volumes" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.557778 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-8ngjs"] Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.559033 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.561689 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.562693 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.571067 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-8ngjs"] Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.686883 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.687124 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkjsw\" (UniqueName: \"kubernetes.io/projected/eb9087b2-2d7f-4014-85f0-aaac4431a44f-kube-api-access-rkjsw\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.687210 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-config-data\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.687236 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-scripts\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.694819 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerStarted","Data":"bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236"} Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.694861 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerStarted","Data":"d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725"} Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.727184 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.729200 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.731459 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.748153 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788568 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788617 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkjsw\" (UniqueName: \"kubernetes.io/projected/eb9087b2-2d7f-4014-85f0-aaac4431a44f-kube-api-access-rkjsw\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788673 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da15fd4e-4614-4c71-a8fc-40457e5fa91a-logs\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788700 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqgvw\" (UniqueName: \"kubernetes.io/projected/da15fd4e-4614-4c71-a8fc-40457e5fa91a-kube-api-access-fqgvw\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788730 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-config-data\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788758 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-scripts\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788780 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-config-data\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.788818 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.796105 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.799826 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-scripts\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.803620 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-config-data\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.816998 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkjsw\" (UniqueName: \"kubernetes.io/projected/eb9087b2-2d7f-4014-85f0-aaac4431a44f-kube-api-access-rkjsw\") pod \"nova-cell0-cell-mapping-8ngjs\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.834444 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.836031 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.840727 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.869429 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.892042 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.892090 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q286p\" (UniqueName: \"kubernetes.io/projected/0575d46d-91de-400a-bfd9-e1e73a810081-kube-api-access-q286p\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.892125 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da15fd4e-4614-4c71-a8fc-40457e5fa91a-logs\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.892142 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-config-data\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.892165 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqgvw\" (UniqueName: \"kubernetes.io/projected/da15fd4e-4614-4c71-a8fc-40457e5fa91a-kube-api-access-fqgvw\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.892210 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-config-data\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.892244 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.896985 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.897261 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da15fd4e-4614-4c71-a8fc-40457e5fa91a-logs\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.897831 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.898852 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.900517 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.912691 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-config-data\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.919246 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.926877 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqgvw\" (UniqueName: \"kubernetes.io/projected/da15fd4e-4614-4c71-a8fc-40457e5fa91a-kube-api-access-fqgvw\") pod \"nova-api-0\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " pod="openstack/nova-api-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.994437 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.994708 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q286p\" (UniqueName: \"kubernetes.io/projected/0575d46d-91de-400a-bfd9-e1e73a810081-kube-api-access-q286p\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.995070 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-config-data\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.995231 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-config-data\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.995310 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.995445 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdzmt\" (UniqueName: \"kubernetes.io/projected/13b369b6-e829-4875-bacb-d8be936ec7be-kube-api-access-vdzmt\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:02 crc kubenswrapper[4926]: I1125 18:34:02.995521 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13b369b6-e829-4875-bacb-d8be936ec7be-logs\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.021482 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.033253 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-config-data\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.045801 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.046844 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.071025 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q286p\" (UniqueName: \"kubernetes.io/projected/0575d46d-91de-400a-bfd9-e1e73a810081-kube-api-access-q286p\") pod \"nova-scheduler-0\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.079450 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c66c57c99-5w8zh"] Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.081177 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.125881 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-config-data\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.133545 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.134139 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdzmt\" (UniqueName: \"kubernetes.io/projected/13b369b6-e829-4875-bacb-d8be936ec7be-kube-api-access-vdzmt\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.134258 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13b369b6-e829-4875-bacb-d8be936ec7be-logs\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.134840 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13b369b6-e829-4875-bacb-d8be936ec7be-logs\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.135964 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-config-data\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.143261 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.155975 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c66c57c99-5w8zh"] Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.170871 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdzmt\" (UniqueName: \"kubernetes.io/projected/13b369b6-e829-4875-bacb-d8be936ec7be-kube-api-access-vdzmt\") pod \"nova-metadata-0\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.186334 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.189922 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.197807 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.207936 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.237833 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.237934 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-config\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.238012 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-svc\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.238133 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9flr\" (UniqueName: \"kubernetes.io/projected/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-kube-api-access-b9flr\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.238150 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.238166 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-swift-storage-0\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.264187 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.302318 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352007 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6pwr\" (UniqueName: \"kubernetes.io/projected/ef389464-cca4-4177-a709-f13850dc4689-kube-api-access-v6pwr\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352041 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352084 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352112 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-config\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352157 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-svc\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352245 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9flr\" (UniqueName: \"kubernetes.io/projected/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-kube-api-access-b9flr\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352260 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352297 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-swift-storage-0\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.352402 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.363613 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-svc\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.364290 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.364822 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.365312 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-config\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.381209 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-swift-storage-0\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.415179 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9flr\" (UniqueName: \"kubernetes.io/projected/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-kube-api-access-b9flr\") pod \"dnsmasq-dns-5c66c57c99-5w8zh\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.458364 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.458497 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6pwr\" (UniqueName: \"kubernetes.io/projected/ef389464-cca4-4177-a709-f13850dc4689-kube-api-access-v6pwr\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.458519 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.479985 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.480947 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.494431 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6pwr\" (UniqueName: \"kubernetes.io/projected/ef389464-cca4-4177-a709-f13850dc4689-kube-api-access-v6pwr\") pod \"nova-cell1-novncproxy-0\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.612927 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.643945 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-8ngjs"] Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.667144 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.732863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerStarted","Data":"311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b"} Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.744504 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8ngjs" event={"ID":"eb9087b2-2d7f-4014-85f0-aaac4431a44f","Type":"ContainerStarted","Data":"fd8d2455f88531bf0aa4a572256a767f895709714385f67da05c6dd66c05bdd2"} Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.835611 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:03 crc kubenswrapper[4926]: I1125 18:34:03.989677 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.118896 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.137756 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f78p6"] Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.138957 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.156362 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.160586 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.171008 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f78p6"] Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.298053 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.298121 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-config-data\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.298158 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjcxz\" (UniqueName: \"kubernetes.io/projected/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-kube-api-access-kjcxz\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.298237 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-scripts\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.302625 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:04 crc kubenswrapper[4926]: W1125 18:34:04.303272 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef389464_cca4_4177_a709_f13850dc4689.slice/crio-4c6922111e503ea0d2e00073ccdb115a127a64bebe7269ea8e9d51ccf774013f WatchSource:0}: Error finding container 4c6922111e503ea0d2e00073ccdb115a127a64bebe7269ea8e9d51ccf774013f: Status 404 returned error can't find the container with id 4c6922111e503ea0d2e00073ccdb115a127a64bebe7269ea8e9d51ccf774013f Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.399394 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-config-data\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.400314 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjcxz\" (UniqueName: \"kubernetes.io/projected/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-kube-api-access-kjcxz\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.400432 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-scripts\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.400511 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.408162 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-config-data\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.422259 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.425717 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-scripts\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.430856 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjcxz\" (UniqueName: \"kubernetes.io/projected/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-kube-api-access-kjcxz\") pod \"nova-cell1-conductor-db-sync-f78p6\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.458484 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c66c57c99-5w8zh"] Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.527698 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.826328 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" event={"ID":"1aa441a9-06d8-4eaa-a0e8-ae79280303b0","Type":"ContainerStarted","Data":"19b5c022c30eccddf48d1cacaad26bb0df2ac2d532e452bc9dbe46cde620591b"} Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.830373 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13b369b6-e829-4875-bacb-d8be936ec7be","Type":"ContainerStarted","Data":"9a1f5459113e74c827e8d11a89195ceeaf05dd4d525c70a5426a750f86059e1a"} Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.831996 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef389464-cca4-4177-a709-f13850dc4689","Type":"ContainerStarted","Data":"4c6922111e503ea0d2e00073ccdb115a127a64bebe7269ea8e9d51ccf774013f"} Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.834221 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0575d46d-91de-400a-bfd9-e1e73a810081","Type":"ContainerStarted","Data":"85dbaabd5d7d834f692a480a69b42db5ab18819567750f99eb2df0fd8db0a576"} Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.838106 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8ngjs" event={"ID":"eb9087b2-2d7f-4014-85f0-aaac4431a44f","Type":"ContainerStarted","Data":"92f8954e0f37e3d6cbb031ef8a8fb3701eb6fb321795f34c5ba132c7afafc824"} Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.844068 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da15fd4e-4614-4c71-a8fc-40457e5fa91a","Type":"ContainerStarted","Data":"6c93bcfdd6c373ccf400cc567f8104414179d72ec86f27ae29f881b220283041"} Nov 25 18:34:04 crc kubenswrapper[4926]: I1125 18:34:04.861242 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-8ngjs" podStartSLOduration=2.86122178 podStartE2EDuration="2.86122178s" podCreationTimestamp="2025-11-25 18:34:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:04.852507215 +0000 UTC m=+1275.238020820" watchObservedRunningTime="2025-11-25 18:34:04.86122178 +0000 UTC m=+1275.246735385" Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.072541 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f78p6"] Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.872776 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f78p6" event={"ID":"b9b169a7-a3f6-4b73-a317-7f9ab22625ad","Type":"ContainerStarted","Data":"a4d39959a369c04fdb1ca840e5d9abeeb2630a9af5b3f0d60b29451eef90549c"} Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.873101 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f78p6" event={"ID":"b9b169a7-a3f6-4b73-a317-7f9ab22625ad","Type":"ContainerStarted","Data":"fb66b8f6a502d03ae83047650cf2a61b0c7c0ef90d5a9f3a9105c3902bd382b9"} Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.880939 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerStarted","Data":"579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3"} Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.881065 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.883039 4926 generic.go:334] "Generic (PLEG): container finished" podID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerID="4a2e95fa552a202c5233f34f707f0d179a9e6d9db8c07961aedbf885a279d23f" exitCode=0 Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.883148 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" event={"ID":"1aa441a9-06d8-4eaa-a0e8-ae79280303b0","Type":"ContainerDied","Data":"4a2e95fa552a202c5233f34f707f0d179a9e6d9db8c07961aedbf885a279d23f"} Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.893169 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-f78p6" podStartSLOduration=1.893153852 podStartE2EDuration="1.893153852s" podCreationTimestamp="2025-11-25 18:34:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:05.891385558 +0000 UTC m=+1276.276899173" watchObservedRunningTime="2025-11-25 18:34:05.893153852 +0000 UTC m=+1276.278667457" Nov 25 18:34:05 crc kubenswrapper[4926]: I1125 18:34:05.912734 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.830743852 podStartE2EDuration="5.912719136s" podCreationTimestamp="2025-11-25 18:34:00 +0000 UTC" firstStartedPulling="2025-11-25 18:34:01.611066808 +0000 UTC m=+1271.996580413" lastFinishedPulling="2025-11-25 18:34:04.693042092 +0000 UTC m=+1275.078555697" observedRunningTime="2025-11-25 18:34:05.911884935 +0000 UTC m=+1276.297398540" watchObservedRunningTime="2025-11-25 18:34:05.912719136 +0000 UTC m=+1276.298232741" Nov 25 18:34:06 crc kubenswrapper[4926]: I1125 18:34:06.960210 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:07 crc kubenswrapper[4926]: I1125 18:34:07.018978 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:08 crc kubenswrapper[4926]: I1125 18:34:08.967483 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0575d46d-91de-400a-bfd9-e1e73a810081","Type":"ContainerStarted","Data":"9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835"} Nov 25 18:34:08 crc kubenswrapper[4926]: I1125 18:34:08.979240 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da15fd4e-4614-4c71-a8fc-40457e5fa91a","Type":"ContainerStarted","Data":"46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643"} Nov 25 18:34:08 crc kubenswrapper[4926]: I1125 18:34:08.979285 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da15fd4e-4614-4c71-a8fc-40457e5fa91a","Type":"ContainerStarted","Data":"20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6"} Nov 25 18:34:08 crc kubenswrapper[4926]: I1125 18:34:08.995049 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" event={"ID":"1aa441a9-06d8-4eaa-a0e8-ae79280303b0","Type":"ContainerStarted","Data":"0f035496039235591f5f6165aa313cf746247336ff4bf426cea037b2b61907c9"} Nov 25 18:34:08 crc kubenswrapper[4926]: I1125 18:34:08.999773 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.002492 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.02463335 podStartE2EDuration="7.002472902s" podCreationTimestamp="2025-11-25 18:34:02 +0000 UTC" firstStartedPulling="2025-11-25 18:34:04.007851733 +0000 UTC m=+1274.393365338" lastFinishedPulling="2025-11-25 18:34:07.985691285 +0000 UTC m=+1278.371204890" observedRunningTime="2025-11-25 18:34:08.994346131 +0000 UTC m=+1279.379859736" watchObservedRunningTime="2025-11-25 18:34:09.002472902 +0000 UTC m=+1279.387986507" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.019680 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13b369b6-e829-4875-bacb-d8be936ec7be","Type":"ContainerStarted","Data":"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169"} Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.019735 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13b369b6-e829-4875-bacb-d8be936ec7be","Type":"ContainerStarted","Data":"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23"} Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.019890 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-log" containerID="cri-o://6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23" gracePeriod=30 Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.019998 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-metadata" containerID="cri-o://8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169" gracePeriod=30 Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.024432 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.025924 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.894023212 podStartE2EDuration="7.025909002s" podCreationTimestamp="2025-11-25 18:34:02 +0000 UTC" firstStartedPulling="2025-11-25 18:34:03.851962739 +0000 UTC m=+1274.237476344" lastFinishedPulling="2025-11-25 18:34:07.983848539 +0000 UTC m=+1278.369362134" observedRunningTime="2025-11-25 18:34:09.020481717 +0000 UTC m=+1279.405995322" watchObservedRunningTime="2025-11-25 18:34:09.025909002 +0000 UTC m=+1279.411422607" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.049044 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.244004923 podStartE2EDuration="7.049019783s" podCreationTimestamp="2025-11-25 18:34:02 +0000 UTC" firstStartedPulling="2025-11-25 18:34:04.174849031 +0000 UTC m=+1274.560362636" lastFinishedPulling="2025-11-25 18:34:07.979863891 +0000 UTC m=+1278.365377496" observedRunningTime="2025-11-25 18:34:09.041788914 +0000 UTC m=+1279.427302519" watchObservedRunningTime="2025-11-25 18:34:09.049019783 +0000 UTC m=+1279.434533388" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.060083 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef389464-cca4-4177-a709-f13850dc4689","Type":"ContainerStarted","Data":"5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b"} Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.060260 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ef389464-cca4-4177-a709-f13850dc4689" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b" gracePeriod=30 Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.071894 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" podStartSLOduration=7.071878918 podStartE2EDuration="7.071878918s" podCreationTimestamp="2025-11-25 18:34:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:09.070528855 +0000 UTC m=+1279.456042460" watchObservedRunningTime="2025-11-25 18:34:09.071878918 +0000 UTC m=+1279.457392523" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.081142 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.098478 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.428351441 podStartE2EDuration="7.098456575s" podCreationTimestamp="2025-11-25 18:34:02 +0000 UTC" firstStartedPulling="2025-11-25 18:34:04.318320599 +0000 UTC m=+1274.703834204" lastFinishedPulling="2025-11-25 18:34:07.988425733 +0000 UTC m=+1278.373939338" observedRunningTime="2025-11-25 18:34:09.08814949 +0000 UTC m=+1279.473663095" watchObservedRunningTime="2025-11-25 18:34:09.098456575 +0000 UTC m=+1279.483970180" Nov 25 18:34:09 crc kubenswrapper[4926]: I1125 18:34:09.977087 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.029400 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-config-data\") pod \"13b369b6-e829-4875-bacb-d8be936ec7be\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.029665 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13b369b6-e829-4875-bacb-d8be936ec7be-logs\") pod \"13b369b6-e829-4875-bacb-d8be936ec7be\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.029814 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdzmt\" (UniqueName: \"kubernetes.io/projected/13b369b6-e829-4875-bacb-d8be936ec7be-kube-api-access-vdzmt\") pod \"13b369b6-e829-4875-bacb-d8be936ec7be\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.029878 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-combined-ca-bundle\") pod \"13b369b6-e829-4875-bacb-d8be936ec7be\" (UID: \"13b369b6-e829-4875-bacb-d8be936ec7be\") " Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.030135 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13b369b6-e829-4875-bacb-d8be936ec7be-logs" (OuterVolumeSpecName: "logs") pod "13b369b6-e829-4875-bacb-d8be936ec7be" (UID: "13b369b6-e829-4875-bacb-d8be936ec7be"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.030698 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13b369b6-e829-4875-bacb-d8be936ec7be-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.039533 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13b369b6-e829-4875-bacb-d8be936ec7be-kube-api-access-vdzmt" (OuterVolumeSpecName: "kube-api-access-vdzmt") pod "13b369b6-e829-4875-bacb-d8be936ec7be" (UID: "13b369b6-e829-4875-bacb-d8be936ec7be"). InnerVolumeSpecName "kube-api-access-vdzmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.088560 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-config-data" (OuterVolumeSpecName: "config-data") pod "13b369b6-e829-4875-bacb-d8be936ec7be" (UID: "13b369b6-e829-4875-bacb-d8be936ec7be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.088955 4926 generic.go:334] "Generic (PLEG): container finished" podID="13b369b6-e829-4875-bacb-d8be936ec7be" containerID="8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169" exitCode=0 Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.089052 4926 generic.go:334] "Generic (PLEG): container finished" podID="13b369b6-e829-4875-bacb-d8be936ec7be" containerID="6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23" exitCode=143 Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.089122 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13b369b6-e829-4875-bacb-d8be936ec7be","Type":"ContainerDied","Data":"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169"} Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.089157 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13b369b6-e829-4875-bacb-d8be936ec7be","Type":"ContainerDied","Data":"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23"} Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.089168 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13b369b6-e829-4875-bacb-d8be936ec7be","Type":"ContainerDied","Data":"9a1f5459113e74c827e8d11a89195ceeaf05dd4d525c70a5426a750f86059e1a"} Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.089185 4926 scope.go:117] "RemoveContainer" containerID="8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.089104 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.091978 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.132853 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdzmt\" (UniqueName: \"kubernetes.io/projected/13b369b6-e829-4875-bacb-d8be936ec7be-kube-api-access-vdzmt\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.132880 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.174338 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.180632 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "13b369b6-e829-4875-bacb-d8be936ec7be" (UID: "13b369b6-e829-4875-bacb-d8be936ec7be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.217944 4926 scope.go:117] "RemoveContainer" containerID="6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.235198 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b369b6-e829-4875-bacb-d8be936ec7be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.300835 4926 scope.go:117] "RemoveContainer" containerID="8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169" Nov 25 18:34:10 crc kubenswrapper[4926]: E1125 18:34:10.301402 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169\": container with ID starting with 8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169 not found: ID does not exist" containerID="8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.301447 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169"} err="failed to get container status \"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169\": rpc error: code = NotFound desc = could not find container \"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169\": container with ID starting with 8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169 not found: ID does not exist" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.301472 4926 scope.go:117] "RemoveContainer" containerID="6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23" Nov 25 18:34:10 crc kubenswrapper[4926]: E1125 18:34:10.301905 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23\": container with ID starting with 6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23 not found: ID does not exist" containerID="6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.301934 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23"} err="failed to get container status \"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23\": rpc error: code = NotFound desc = could not find container \"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23\": container with ID starting with 6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23 not found: ID does not exist" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.301950 4926 scope.go:117] "RemoveContainer" containerID="8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.302163 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169"} err="failed to get container status \"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169\": rpc error: code = NotFound desc = could not find container \"8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169\": container with ID starting with 8c22649dbe8d8c4349ad8bf1846c22f2a3bcabc13db78d406464395d05000169 not found: ID does not exist" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.302180 4926 scope.go:117] "RemoveContainer" containerID="6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.302446 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23"} err="failed to get container status \"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23\": rpc error: code = NotFound desc = could not find container \"6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23\": container with ID starting with 6e638bc8663200e770795324ba12e157c7e921f6b61cb67a8770d326696d6a23 not found: ID does not exist" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.430388 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.442328 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.462092 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:10 crc kubenswrapper[4926]: E1125 18:34:10.462696 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-metadata" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.462723 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-metadata" Nov 25 18:34:10 crc kubenswrapper[4926]: E1125 18:34:10.462740 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-log" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.462748 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-log" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.463017 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-metadata" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.463056 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" containerName="nova-metadata-log" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.464505 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.470283 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.470600 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.479902 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.541756 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423572b7-047e-4745-8583-757e1109a770-logs\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.541820 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxvpg\" (UniqueName: \"kubernetes.io/projected/423572b7-047e-4745-8583-757e1109a770-kube-api-access-lxvpg\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.541866 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.542263 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-config-data\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.542481 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.644792 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.644993 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-config-data\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.645075 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.645111 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423572b7-047e-4745-8583-757e1109a770-logs\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.645147 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxvpg\" (UniqueName: \"kubernetes.io/projected/423572b7-047e-4745-8583-757e1109a770-kube-api-access-lxvpg\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.645502 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423572b7-047e-4745-8583-757e1109a770-logs\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.654507 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.669846 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-config-data\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.688676 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.691723 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxvpg\" (UniqueName: \"kubernetes.io/projected/423572b7-047e-4745-8583-757e1109a770-kube-api-access-lxvpg\") pod \"nova-metadata-0\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " pod="openstack/nova-metadata-0" Nov 25 18:34:10 crc kubenswrapper[4926]: I1125 18:34:10.790098 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:11 crc kubenswrapper[4926]: I1125 18:34:11.332850 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:12 crc kubenswrapper[4926]: I1125 18:34:12.120182 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"423572b7-047e-4745-8583-757e1109a770","Type":"ContainerStarted","Data":"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477"} Nov 25 18:34:12 crc kubenswrapper[4926]: I1125 18:34:12.121834 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"423572b7-047e-4745-8583-757e1109a770","Type":"ContainerStarted","Data":"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d"} Nov 25 18:34:12 crc kubenswrapper[4926]: I1125 18:34:12.121854 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"423572b7-047e-4745-8583-757e1109a770","Type":"ContainerStarted","Data":"e5c83dce7499fe92a8610cabbadbeec028f62ed0c2bbb9418fa5a7f783df8981"} Nov 25 18:34:12 crc kubenswrapper[4926]: I1125 18:34:12.144960 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.144941031 podStartE2EDuration="2.144941031s" podCreationTimestamp="2025-11-25 18:34:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:12.138057062 +0000 UTC m=+1282.523570667" watchObservedRunningTime="2025-11-25 18:34:12.144941031 +0000 UTC m=+1282.530454636" Nov 25 18:34:12 crc kubenswrapper[4926]: I1125 18:34:12.343206 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13b369b6-e829-4875-bacb-d8be936ec7be" path="/var/lib/kubelet/pods/13b369b6-e829-4875-bacb-d8be936ec7be/volumes" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.047010 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.047107 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.265418 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.265470 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.305261 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.614490 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.668002 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:13 crc kubenswrapper[4926]: I1125 18:34:13.706528 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8cdb477f-d95t9"] Nov 25 18:34:14 crc kubenswrapper[4926]: I1125 18:34:14.132621 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.208:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:14 crc kubenswrapper[4926]: I1125 18:34:14.133053 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.208:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:14 crc kubenswrapper[4926]: I1125 18:34:14.164162 4926 generic.go:334] "Generic (PLEG): container finished" podID="eb9087b2-2d7f-4014-85f0-aaac4431a44f" containerID="92f8954e0f37e3d6cbb031ef8a8fb3701eb6fb321795f34c5ba132c7afafc824" exitCode=0 Nov 25 18:34:14 crc kubenswrapper[4926]: I1125 18:34:14.165599 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8ngjs" event={"ID":"eb9087b2-2d7f-4014-85f0-aaac4431a44f","Type":"ContainerDied","Data":"92f8954e0f37e3d6cbb031ef8a8fb3701eb6fb321795f34c5ba132c7afafc824"} Nov 25 18:34:14 crc kubenswrapper[4926]: I1125 18:34:14.165766 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" podUID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerName="dnsmasq-dns" containerID="cri-o://e09585bb6fb7be6abc4d3de84cfdf5a04056492375e8e4257b0e9896a134e7b8" gracePeriod=10 Nov 25 18:34:14 crc kubenswrapper[4926]: I1125 18:34:14.399245 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.198902 4926 generic.go:334] "Generic (PLEG): container finished" podID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerID="e09585bb6fb7be6abc4d3de84cfdf5a04056492375e8e4257b0e9896a134e7b8" exitCode=0 Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.199139 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" event={"ID":"c59068f7-7f7a-4de3-b312-9f40bfd46128","Type":"ContainerDied","Data":"e09585bb6fb7be6abc4d3de84cfdf5a04056492375e8e4257b0e9896a134e7b8"} Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.374892 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.465271 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-config\") pod \"c59068f7-7f7a-4de3-b312-9f40bfd46128\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.465462 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-nb\") pod \"c59068f7-7f7a-4de3-b312-9f40bfd46128\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.465599 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-swift-storage-0\") pod \"c59068f7-7f7a-4de3-b312-9f40bfd46128\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.465652 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwpk2\" (UniqueName: \"kubernetes.io/projected/c59068f7-7f7a-4de3-b312-9f40bfd46128-kube-api-access-lwpk2\") pod \"c59068f7-7f7a-4de3-b312-9f40bfd46128\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.465680 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-svc\") pod \"c59068f7-7f7a-4de3-b312-9f40bfd46128\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.465727 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-sb\") pod \"c59068f7-7f7a-4de3-b312-9f40bfd46128\" (UID: \"c59068f7-7f7a-4de3-b312-9f40bfd46128\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.502507 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c59068f7-7f7a-4de3-b312-9f40bfd46128-kube-api-access-lwpk2" (OuterVolumeSpecName: "kube-api-access-lwpk2") pod "c59068f7-7f7a-4de3-b312-9f40bfd46128" (UID: "c59068f7-7f7a-4de3-b312-9f40bfd46128"). InnerVolumeSpecName "kube-api-access-lwpk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.527632 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-config" (OuterVolumeSpecName: "config") pod "c59068f7-7f7a-4de3-b312-9f40bfd46128" (UID: "c59068f7-7f7a-4de3-b312-9f40bfd46128"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.537664 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c59068f7-7f7a-4de3-b312-9f40bfd46128" (UID: "c59068f7-7f7a-4de3-b312-9f40bfd46128"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.555070 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c59068f7-7f7a-4de3-b312-9f40bfd46128" (UID: "c59068f7-7f7a-4de3-b312-9f40bfd46128"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.556708 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c59068f7-7f7a-4de3-b312-9f40bfd46128" (UID: "c59068f7-7f7a-4de3-b312-9f40bfd46128"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.568354 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.568521 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.568592 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwpk2\" (UniqueName: \"kubernetes.io/projected/c59068f7-7f7a-4de3-b312-9f40bfd46128-kube-api-access-lwpk2\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.568649 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.568703 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.577313 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c59068f7-7f7a-4de3-b312-9f40bfd46128" (UID: "c59068f7-7f7a-4de3-b312-9f40bfd46128"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.640186 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.669814 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkjsw\" (UniqueName: \"kubernetes.io/projected/eb9087b2-2d7f-4014-85f0-aaac4431a44f-kube-api-access-rkjsw\") pod \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.669913 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-combined-ca-bundle\") pod \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.669937 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-config-data\") pod \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.669968 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-scripts\") pod \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\" (UID: \"eb9087b2-2d7f-4014-85f0-aaac4431a44f\") " Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.671157 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c59068f7-7f7a-4de3-b312-9f40bfd46128-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.677942 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-scripts" (OuterVolumeSpecName: "scripts") pod "eb9087b2-2d7f-4014-85f0-aaac4431a44f" (UID: "eb9087b2-2d7f-4014-85f0-aaac4431a44f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.679566 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb9087b2-2d7f-4014-85f0-aaac4431a44f-kube-api-access-rkjsw" (OuterVolumeSpecName: "kube-api-access-rkjsw") pod "eb9087b2-2d7f-4014-85f0-aaac4431a44f" (UID: "eb9087b2-2d7f-4014-85f0-aaac4431a44f"). InnerVolumeSpecName "kube-api-access-rkjsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.730073 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb9087b2-2d7f-4014-85f0-aaac4431a44f" (UID: "eb9087b2-2d7f-4014-85f0-aaac4431a44f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.761725 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-config-data" (OuterVolumeSpecName: "config-data") pod "eb9087b2-2d7f-4014-85f0-aaac4431a44f" (UID: "eb9087b2-2d7f-4014-85f0-aaac4431a44f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.774027 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkjsw\" (UniqueName: \"kubernetes.io/projected/eb9087b2-2d7f-4014-85f0-aaac4431a44f-kube-api-access-rkjsw\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.774257 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.774321 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.774404 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb9087b2-2d7f-4014-85f0-aaac4431a44f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.790435 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 18:34:15 crc kubenswrapper[4926]: I1125 18:34:15.790696 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.213036 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" event={"ID":"c59068f7-7f7a-4de3-b312-9f40bfd46128","Type":"ContainerDied","Data":"32e08a98c700a1afc37077a24ed302528eea0addd0f7483ebd8db70d9307c7b0"} Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.213120 4926 scope.go:117] "RemoveContainer" containerID="e09585bb6fb7be6abc4d3de84cfdf5a04056492375e8e4257b0e9896a134e7b8" Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.214762 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cdb477f-d95t9" Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.215864 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8ngjs" event={"ID":"eb9087b2-2d7f-4014-85f0-aaac4431a44f","Type":"ContainerDied","Data":"fd8d2455f88531bf0aa4a572256a767f895709714385f67da05c6dd66c05bdd2"} Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.215899 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd8d2455f88531bf0aa4a572256a767f895709714385f67da05c6dd66c05bdd2" Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.216011 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8ngjs" Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.266487 4926 scope.go:117] "RemoveContainer" containerID="2eefbb6fff89446d845401b685dabf9ac0ed331636e88358f5125f9408e38ff9" Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.287643 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8cdb477f-d95t9"] Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.297073 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8cdb477f-d95t9"] Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.357662 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c59068f7-7f7a-4de3-b312-9f40bfd46128" path="/var/lib/kubelet/pods/c59068f7-7f7a-4de3-b312-9f40bfd46128/volumes" Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.399834 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.400218 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-log" containerID="cri-o://20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6" gracePeriod=30 Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.400336 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-api" containerID="cri-o://46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643" gracePeriod=30 Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.415709 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.416090 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0575d46d-91de-400a-bfd9-e1e73a810081" containerName="nova-scheduler-scheduler" containerID="cri-o://9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835" gracePeriod=30 Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.437454 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.438161 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-log" containerID="cri-o://73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d" gracePeriod=30 Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.439013 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-metadata" containerID="cri-o://d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477" gracePeriod=30 Nov 25 18:34:16 crc kubenswrapper[4926]: I1125 18:34:16.986850 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.102288 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-combined-ca-bundle\") pod \"423572b7-047e-4745-8583-757e1109a770\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.102647 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-config-data\") pod \"423572b7-047e-4745-8583-757e1109a770\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.102745 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423572b7-047e-4745-8583-757e1109a770-logs\") pod \"423572b7-047e-4745-8583-757e1109a770\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.102802 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxvpg\" (UniqueName: \"kubernetes.io/projected/423572b7-047e-4745-8583-757e1109a770-kube-api-access-lxvpg\") pod \"423572b7-047e-4745-8583-757e1109a770\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.102918 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-nova-metadata-tls-certs\") pod \"423572b7-047e-4745-8583-757e1109a770\" (UID: \"423572b7-047e-4745-8583-757e1109a770\") " Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.103341 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/423572b7-047e-4745-8583-757e1109a770-logs" (OuterVolumeSpecName: "logs") pod "423572b7-047e-4745-8583-757e1109a770" (UID: "423572b7-047e-4745-8583-757e1109a770"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.109820 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/423572b7-047e-4745-8583-757e1109a770-kube-api-access-lxvpg" (OuterVolumeSpecName: "kube-api-access-lxvpg") pod "423572b7-047e-4745-8583-757e1109a770" (UID: "423572b7-047e-4745-8583-757e1109a770"). InnerVolumeSpecName "kube-api-access-lxvpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.140042 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "423572b7-047e-4745-8583-757e1109a770" (UID: "423572b7-047e-4745-8583-757e1109a770"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.141506 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-config-data" (OuterVolumeSpecName: "config-data") pod "423572b7-047e-4745-8583-757e1109a770" (UID: "423572b7-047e-4745-8583-757e1109a770"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.176490 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "423572b7-047e-4745-8583-757e1109a770" (UID: "423572b7-047e-4745-8583-757e1109a770"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.206121 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423572b7-047e-4745-8583-757e1109a770-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.206419 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxvpg\" (UniqueName: \"kubernetes.io/projected/423572b7-047e-4745-8583-757e1109a770-kube-api-access-lxvpg\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.206616 4926 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.206761 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.206891 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423572b7-047e-4745-8583-757e1109a770-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.231945 4926 generic.go:334] "Generic (PLEG): container finished" podID="b9b169a7-a3f6-4b73-a317-7f9ab22625ad" containerID="a4d39959a369c04fdb1ca840e5d9abeeb2630a9af5b3f0d60b29451eef90549c" exitCode=0 Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.232030 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f78p6" event={"ID":"b9b169a7-a3f6-4b73-a317-7f9ab22625ad","Type":"ContainerDied","Data":"a4d39959a369c04fdb1ca840e5d9abeeb2630a9af5b3f0d60b29451eef90549c"} Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.234620 4926 generic.go:334] "Generic (PLEG): container finished" podID="423572b7-047e-4745-8583-757e1109a770" containerID="d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477" exitCode=0 Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.234643 4926 generic.go:334] "Generic (PLEG): container finished" podID="423572b7-047e-4745-8583-757e1109a770" containerID="73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d" exitCode=143 Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.234696 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"423572b7-047e-4745-8583-757e1109a770","Type":"ContainerDied","Data":"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477"} Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.234713 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"423572b7-047e-4745-8583-757e1109a770","Type":"ContainerDied","Data":"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d"} Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.234725 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"423572b7-047e-4745-8583-757e1109a770","Type":"ContainerDied","Data":"e5c83dce7499fe92a8610cabbadbeec028f62ed0c2bbb9418fa5a7f783df8981"} Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.234747 4926 scope.go:117] "RemoveContainer" containerID="d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.235300 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.238490 4926 generic.go:334] "Generic (PLEG): container finished" podID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerID="20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6" exitCode=143 Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.238562 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da15fd4e-4614-4c71-a8fc-40457e5fa91a","Type":"ContainerDied","Data":"20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6"} Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.268911 4926 scope.go:117] "RemoveContainer" containerID="73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.298433 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.310669 4926 scope.go:117] "RemoveContainer" containerID="d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477" Nov 25 18:34:17 crc kubenswrapper[4926]: E1125 18:34:17.312053 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477\": container with ID starting with d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477 not found: ID does not exist" containerID="d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.312331 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477"} err="failed to get container status \"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477\": rpc error: code = NotFound desc = could not find container \"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477\": container with ID starting with d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477 not found: ID does not exist" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.312545 4926 scope.go:117] "RemoveContainer" containerID="73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.314642 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:17 crc kubenswrapper[4926]: E1125 18:34:17.315938 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d\": container with ID starting with 73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d not found: ID does not exist" containerID="73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.315992 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d"} err="failed to get container status \"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d\": rpc error: code = NotFound desc = could not find container \"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d\": container with ID starting with 73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d not found: ID does not exist" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.316022 4926 scope.go:117] "RemoveContainer" containerID="d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.316580 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477"} err="failed to get container status \"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477\": rpc error: code = NotFound desc = could not find container \"d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477\": container with ID starting with d00e2db8d086bb5ee512e76fb64b5e845078be20225c51dadadbacdab9ccb477 not found: ID does not exist" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.316609 4926 scope.go:117] "RemoveContainer" containerID="73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.316987 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d"} err="failed to get container status \"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d\": rpc error: code = NotFound desc = could not find container \"73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d\": container with ID starting with 73d5903f420879c71e0b906478b2260b006139f915401a5dd296cb4046bd0a5d not found: ID does not exist" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.344127 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:17 crc kubenswrapper[4926]: E1125 18:34:17.344934 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb9087b2-2d7f-4014-85f0-aaac4431a44f" containerName="nova-manage" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.344957 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb9087b2-2d7f-4014-85f0-aaac4431a44f" containerName="nova-manage" Nov 25 18:34:17 crc kubenswrapper[4926]: E1125 18:34:17.344994 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-log" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.345003 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-log" Nov 25 18:34:17 crc kubenswrapper[4926]: E1125 18:34:17.345030 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerName="dnsmasq-dns" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.345037 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerName="dnsmasq-dns" Nov 25 18:34:17 crc kubenswrapper[4926]: E1125 18:34:17.346689 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerName="init" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.346707 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerName="init" Nov 25 18:34:17 crc kubenswrapper[4926]: E1125 18:34:17.346733 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-metadata" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.346740 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-metadata" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.348310 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb9087b2-2d7f-4014-85f0-aaac4431a44f" containerName="nova-manage" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.348392 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c59068f7-7f7a-4de3-b312-9f40bfd46128" containerName="dnsmasq-dns" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.348409 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-log" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.348438 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="423572b7-047e-4745-8583-757e1109a770" containerName="nova-metadata-metadata" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.350455 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.353991 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.354016 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.367530 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.417021 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-config-data\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.417199 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63dc318-bb0d-492a-b59a-1944cc047b83-logs\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.417663 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.417736 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn5bc\" (UniqueName: \"kubernetes.io/projected/d63dc318-bb0d-492a-b59a-1944cc047b83-kube-api-access-cn5bc\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.417773 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.519251 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn5bc\" (UniqueName: \"kubernetes.io/projected/d63dc318-bb0d-492a-b59a-1944cc047b83-kube-api-access-cn5bc\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.519327 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.519506 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-config-data\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.519595 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63dc318-bb0d-492a-b59a-1944cc047b83-logs\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.521028 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.520017 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63dc318-bb0d-492a-b59a-1944cc047b83-logs\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.523026 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-config-data\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.523242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.524392 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.536578 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn5bc\" (UniqueName: \"kubernetes.io/projected/d63dc318-bb0d-492a-b59a-1944cc047b83-kube-api-access-cn5bc\") pod \"nova-metadata-0\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " pod="openstack/nova-metadata-0" Nov 25 18:34:17 crc kubenswrapper[4926]: I1125 18:34:17.681546 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.000768 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.028870 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-combined-ca-bundle\") pod \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.029167 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqgvw\" (UniqueName: \"kubernetes.io/projected/da15fd4e-4614-4c71-a8fc-40457e5fa91a-kube-api-access-fqgvw\") pod \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.029209 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da15fd4e-4614-4c71-a8fc-40457e5fa91a-logs\") pod \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.029250 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-config-data\") pod \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\" (UID: \"da15fd4e-4614-4c71-a8fc-40457e5fa91a\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.030193 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da15fd4e-4614-4c71-a8fc-40457e5fa91a-logs" (OuterVolumeSpecName: "logs") pod "da15fd4e-4614-4c71-a8fc-40457e5fa91a" (UID: "da15fd4e-4614-4c71-a8fc-40457e5fa91a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.034087 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da15fd4e-4614-4c71-a8fc-40457e5fa91a-kube-api-access-fqgvw" (OuterVolumeSpecName: "kube-api-access-fqgvw") pod "da15fd4e-4614-4c71-a8fc-40457e5fa91a" (UID: "da15fd4e-4614-4c71-a8fc-40457e5fa91a"). InnerVolumeSpecName "kube-api-access-fqgvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.071394 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da15fd4e-4614-4c71-a8fc-40457e5fa91a" (UID: "da15fd4e-4614-4c71-a8fc-40457e5fa91a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.072481 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-config-data" (OuterVolumeSpecName: "config-data") pod "da15fd4e-4614-4c71-a8fc-40457e5fa91a" (UID: "da15fd4e-4614-4c71-a8fc-40457e5fa91a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.131647 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.131681 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqgvw\" (UniqueName: \"kubernetes.io/projected/da15fd4e-4614-4c71-a8fc-40457e5fa91a-kube-api-access-fqgvw\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.131693 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da15fd4e-4614-4c71-a8fc-40457e5fa91a-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.131701 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da15fd4e-4614-4c71-a8fc-40457e5fa91a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.202513 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.250822 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63dc318-bb0d-492a-b59a-1944cc047b83","Type":"ContainerStarted","Data":"de82a896b70c92ea16aa04ed0cddce5c874ab558629314d4074682089a2be9eb"} Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.257765 4926 generic.go:334] "Generic (PLEG): container finished" podID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerID="46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643" exitCode=0 Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.257862 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.257920 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da15fd4e-4614-4c71-a8fc-40457e5fa91a","Type":"ContainerDied","Data":"46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643"} Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.257962 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da15fd4e-4614-4c71-a8fc-40457e5fa91a","Type":"ContainerDied","Data":"6c93bcfdd6c373ccf400cc567f8104414179d72ec86f27ae29f881b220283041"} Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.257984 4926 scope.go:117] "RemoveContainer" containerID="46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643" Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.268519 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.272172 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.274133 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.274178 4926 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0575d46d-91de-400a-bfd9-e1e73a810081" containerName="nova-scheduler-scheduler" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.300785 4926 scope.go:117] "RemoveContainer" containerID="20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.302756 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.319886 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.339628 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="423572b7-047e-4745-8583-757e1109a770" path="/var/lib/kubelet/pods/423572b7-047e-4745-8583-757e1109a770/volumes" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.340248 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" path="/var/lib/kubelet/pods/da15fd4e-4614-4c71-a8fc-40457e5fa91a/volumes" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.340827 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.341158 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-api" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.341174 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-api" Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.341194 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-log" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.341200 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-log" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.341482 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-log" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.341498 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="da15fd4e-4614-4c71-a8fc-40457e5fa91a" containerName="nova-api-api" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.342621 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.346593 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.350519 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.356905 4926 scope.go:117] "RemoveContainer" containerID="46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643" Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.357232 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643\": container with ID starting with 46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643 not found: ID does not exist" containerID="46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.357264 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643"} err="failed to get container status \"46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643\": rpc error: code = NotFound desc = could not find container \"46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643\": container with ID starting with 46383df8fa2ffc16c384ee3f62e353bb6912280ede9db8a53ab3afdca052a643 not found: ID does not exist" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.357287 4926 scope.go:117] "RemoveContainer" containerID="20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6" Nov 25 18:34:18 crc kubenswrapper[4926]: E1125 18:34:18.357476 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6\": container with ID starting with 20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6 not found: ID does not exist" containerID="20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.357499 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6"} err="failed to get container status \"20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6\": rpc error: code = NotFound desc = could not find container \"20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6\": container with ID starting with 20246bdb73b0c2072d833b4be4bdf13d312b2df2c2f11bf230b4eadfa74102a6 not found: ID does not exist" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.436270 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd77a81c-f483-45c2-98a4-f0211de856a6-logs\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.437045 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5jl9\" (UniqueName: \"kubernetes.io/projected/dd77a81c-f483-45c2-98a4-f0211de856a6-kube-api-access-r5jl9\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.437644 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-config-data\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.437675 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.540215 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5jl9\" (UniqueName: \"kubernetes.io/projected/dd77a81c-f483-45c2-98a4-f0211de856a6-kube-api-access-r5jl9\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.540311 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.540337 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-config-data\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.540536 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd77a81c-f483-45c2-98a4-f0211de856a6-logs\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.540903 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd77a81c-f483-45c2-98a4-f0211de856a6-logs\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.544107 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.544462 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-config-data\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.555783 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5jl9\" (UniqueName: \"kubernetes.io/projected/dd77a81c-f483-45c2-98a4-f0211de856a6-kube-api-access-r5jl9\") pod \"nova-api-0\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.693046 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.854267 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.950487 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-scripts\") pod \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.950582 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjcxz\" (UniqueName: \"kubernetes.io/projected/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-kube-api-access-kjcxz\") pod \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.951659 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-combined-ca-bundle\") pod \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.951842 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-config-data\") pod \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\" (UID: \"b9b169a7-a3f6-4b73-a317-7f9ab22625ad\") " Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.956529 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-scripts" (OuterVolumeSpecName: "scripts") pod "b9b169a7-a3f6-4b73-a317-7f9ab22625ad" (UID: "b9b169a7-a3f6-4b73-a317-7f9ab22625ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:18 crc kubenswrapper[4926]: I1125 18:34:18.971357 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-kube-api-access-kjcxz" (OuterVolumeSpecName: "kube-api-access-kjcxz") pod "b9b169a7-a3f6-4b73-a317-7f9ab22625ad" (UID: "b9b169a7-a3f6-4b73-a317-7f9ab22625ad"). InnerVolumeSpecName "kube-api-access-kjcxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.005394 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9b169a7-a3f6-4b73-a317-7f9ab22625ad" (UID: "b9b169a7-a3f6-4b73-a317-7f9ab22625ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.011562 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-config-data" (OuterVolumeSpecName: "config-data") pod "b9b169a7-a3f6-4b73-a317-7f9ab22625ad" (UID: "b9b169a7-a3f6-4b73-a317-7f9ab22625ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.060251 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.060285 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.060295 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjcxz\" (UniqueName: \"kubernetes.io/projected/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-kube-api-access-kjcxz\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.060307 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b169a7-a3f6-4b73-a317-7f9ab22625ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.273990 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f78p6" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.274007 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f78p6" event={"ID":"b9b169a7-a3f6-4b73-a317-7f9ab22625ad","Type":"ContainerDied","Data":"fb66b8f6a502d03ae83047650cf2a61b0c7c0ef90d5a9f3a9105c3902bd382b9"} Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.274095 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb66b8f6a502d03ae83047650cf2a61b0c7c0ef90d5a9f3a9105c3902bd382b9" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.276997 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63dc318-bb0d-492a-b59a-1944cc047b83","Type":"ContainerStarted","Data":"8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776"} Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.277136 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63dc318-bb0d-492a-b59a-1944cc047b83","Type":"ContainerStarted","Data":"5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028"} Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.290775 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:19 crc kubenswrapper[4926]: W1125 18:34:19.295423 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd77a81c_f483_45c2_98a4_f0211de856a6.slice/crio-73e3232aa159ddb79801be338297fc61584a7ceb3139636be150235cad24ec66 WatchSource:0}: Error finding container 73e3232aa159ddb79801be338297fc61584a7ceb3139636be150235cad24ec66: Status 404 returned error can't find the container with id 73e3232aa159ddb79801be338297fc61584a7ceb3139636be150235cad24ec66 Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.338337 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.33831259 podStartE2EDuration="2.33831259s" podCreationTimestamp="2025-11-25 18:34:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:19.305201791 +0000 UTC m=+1289.690715426" watchObservedRunningTime="2025-11-25 18:34:19.33831259 +0000 UTC m=+1289.723826205" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.372517 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 18:34:19 crc kubenswrapper[4926]: E1125 18:34:19.373063 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9b169a7-a3f6-4b73-a317-7f9ab22625ad" containerName="nova-cell1-conductor-db-sync" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.373077 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9b169a7-a3f6-4b73-a317-7f9ab22625ad" containerName="nova-cell1-conductor-db-sync" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.373325 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9b169a7-a3f6-4b73-a317-7f9ab22625ad" containerName="nova-cell1-conductor-db-sync" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.374092 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.376438 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.386266 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.468006 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5574e7bb-0691-4368-acd1-8e50441657e2-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.468310 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5574e7bb-0691-4368-acd1-8e50441657e2-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.468383 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldxvt\" (UniqueName: \"kubernetes.io/projected/5574e7bb-0691-4368-acd1-8e50441657e2-kube-api-access-ldxvt\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.569704 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5574e7bb-0691-4368-acd1-8e50441657e2-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.569963 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5574e7bb-0691-4368-acd1-8e50441657e2-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.570155 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldxvt\" (UniqueName: \"kubernetes.io/projected/5574e7bb-0691-4368-acd1-8e50441657e2-kube-api-access-ldxvt\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.575109 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5574e7bb-0691-4368-acd1-8e50441657e2-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.575687 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5574e7bb-0691-4368-acd1-8e50441657e2-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.587745 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldxvt\" (UniqueName: \"kubernetes.io/projected/5574e7bb-0691-4368-acd1-8e50441657e2-kube-api-access-ldxvt\") pod \"nova-cell1-conductor-0\" (UID: \"5574e7bb-0691-4368-acd1-8e50441657e2\") " pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:19 crc kubenswrapper[4926]: I1125 18:34:19.705534 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.031608 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.291757 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dd77a81c-f483-45c2-98a4-f0211de856a6","Type":"ContainerStarted","Data":"26627a60dc1773bd1470d98f4de0381a3324901df83fa84e8f0a2439772867c6"} Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.292248 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dd77a81c-f483-45c2-98a4-f0211de856a6","Type":"ContainerStarted","Data":"dc9291bc258f7e85a5539f094c4cd0a41cad6ac855cc51a49cbdd4e72c654ff4"} Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.292261 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dd77a81c-f483-45c2-98a4-f0211de856a6","Type":"ContainerStarted","Data":"73e3232aa159ddb79801be338297fc61584a7ceb3139636be150235cad24ec66"} Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.293949 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5574e7bb-0691-4368-acd1-8e50441657e2","Type":"ContainerStarted","Data":"e5b6a88a6d6a5b4f6729c3a20e295857d650f1708061bb5c2bd09a4953648629"} Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.293981 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5574e7bb-0691-4368-acd1-8e50441657e2","Type":"ContainerStarted","Data":"6511ad02763bb706060191d0e8dfcd573c9a2c20c791bb277ea674665b0ffcb4"} Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.340219 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.340200899 podStartE2EDuration="2.340200899s" podCreationTimestamp="2025-11-25 18:34:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:20.307285275 +0000 UTC m=+1290.692798910" watchObservedRunningTime="2025-11-25 18:34:20.340200899 +0000 UTC m=+1290.725714504" Nov 25 18:34:20 crc kubenswrapper[4926]: I1125 18:34:20.344068 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=1.344056294 podStartE2EDuration="1.344056294s" podCreationTimestamp="2025-11-25 18:34:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:20.328705775 +0000 UTC m=+1290.714219420" watchObservedRunningTime="2025-11-25 18:34:20.344056294 +0000 UTC m=+1290.729569899" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.308854 4926 generic.go:334] "Generic (PLEG): container finished" podID="0575d46d-91de-400a-bfd9-e1e73a810081" containerID="9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835" exitCode=0 Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.308922 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0575d46d-91de-400a-bfd9-e1e73a810081","Type":"ContainerDied","Data":"9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835"} Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.308979 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0575d46d-91de-400a-bfd9-e1e73a810081","Type":"ContainerDied","Data":"85dbaabd5d7d834f692a480a69b42db5ab18819567750f99eb2df0fd8db0a576"} Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.308999 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85dbaabd5d7d834f692a480a69b42db5ab18819567750f99eb2df0fd8db0a576" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.309416 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.347408 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.406575 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q286p\" (UniqueName: \"kubernetes.io/projected/0575d46d-91de-400a-bfd9-e1e73a810081-kube-api-access-q286p\") pod \"0575d46d-91de-400a-bfd9-e1e73a810081\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.407557 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-combined-ca-bundle\") pod \"0575d46d-91de-400a-bfd9-e1e73a810081\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.407813 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-config-data\") pod \"0575d46d-91de-400a-bfd9-e1e73a810081\" (UID: \"0575d46d-91de-400a-bfd9-e1e73a810081\") " Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.413206 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0575d46d-91de-400a-bfd9-e1e73a810081-kube-api-access-q286p" (OuterVolumeSpecName: "kube-api-access-q286p") pod "0575d46d-91de-400a-bfd9-e1e73a810081" (UID: "0575d46d-91de-400a-bfd9-e1e73a810081"). InnerVolumeSpecName "kube-api-access-q286p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.451536 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-config-data" (OuterVolumeSpecName: "config-data") pod "0575d46d-91de-400a-bfd9-e1e73a810081" (UID: "0575d46d-91de-400a-bfd9-e1e73a810081"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.452476 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0575d46d-91de-400a-bfd9-e1e73a810081" (UID: "0575d46d-91de-400a-bfd9-e1e73a810081"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.510678 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.510724 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0575d46d-91de-400a-bfd9-e1e73a810081-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:21 crc kubenswrapper[4926]: I1125 18:34:21.510737 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q286p\" (UniqueName: \"kubernetes.io/projected/0575d46d-91de-400a-bfd9-e1e73a810081-kube-api-access-q286p\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.319461 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.369462 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.393151 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.413504 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:22 crc kubenswrapper[4926]: E1125 18:34:22.414028 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0575d46d-91de-400a-bfd9-e1e73a810081" containerName="nova-scheduler-scheduler" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.414050 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0575d46d-91de-400a-bfd9-e1e73a810081" containerName="nova-scheduler-scheduler" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.414286 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0575d46d-91de-400a-bfd9-e1e73a810081" containerName="nova-scheduler-scheduler" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.415144 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.418432 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.444263 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.542574 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-config-data\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.542854 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgdjq\" (UniqueName: \"kubernetes.io/projected/83dbed29-b873-4c13-8dd2-82577a28bb81-kube-api-access-vgdjq\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.543042 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.645771 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-config-data\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.646331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgdjq\" (UniqueName: \"kubernetes.io/projected/83dbed29-b873-4c13-8dd2-82577a28bb81-kube-api-access-vgdjq\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.646478 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.650823 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.658994 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-config-data\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.667709 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgdjq\" (UniqueName: \"kubernetes.io/projected/83dbed29-b873-4c13-8dd2-82577a28bb81-kube-api-access-vgdjq\") pod \"nova-scheduler-0\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " pod="openstack/nova-scheduler-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.681712 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.682985 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 18:34:22 crc kubenswrapper[4926]: I1125 18:34:22.742649 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:34:23 crc kubenswrapper[4926]: I1125 18:34:23.237657 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:34:23 crc kubenswrapper[4926]: I1125 18:34:23.332533 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83dbed29-b873-4c13-8dd2-82577a28bb81","Type":"ContainerStarted","Data":"c9d5e9e2ac13ae9d6c9b4cc467c7f78347901ef8bfdb4921cd1bc30d65f96fe1"} Nov 25 18:34:24 crc kubenswrapper[4926]: I1125 18:34:24.343796 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0575d46d-91de-400a-bfd9-e1e73a810081" path="/var/lib/kubelet/pods/0575d46d-91de-400a-bfd9-e1e73a810081/volumes" Nov 25 18:34:24 crc kubenswrapper[4926]: I1125 18:34:24.348760 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83dbed29-b873-4c13-8dd2-82577a28bb81","Type":"ContainerStarted","Data":"1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e"} Nov 25 18:34:24 crc kubenswrapper[4926]: I1125 18:34:24.372045 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.372015325 podStartE2EDuration="2.372015325s" podCreationTimestamp="2025-11-25 18:34:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:24.369257767 +0000 UTC m=+1294.754771442" watchObservedRunningTime="2025-11-25 18:34:24.372015325 +0000 UTC m=+1294.757528960" Nov 25 18:34:27 crc kubenswrapper[4926]: I1125 18:34:27.682289 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 18:34:27 crc kubenswrapper[4926]: I1125 18:34:27.683171 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 18:34:27 crc kubenswrapper[4926]: I1125 18:34:27.743351 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 18:34:28 crc kubenswrapper[4926]: I1125 18:34:28.694061 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:34:28 crc kubenswrapper[4926]: I1125 18:34:28.694490 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:34:28 crc kubenswrapper[4926]: I1125 18:34:28.701654 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.215:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:28 crc kubenswrapper[4926]: I1125 18:34:28.701707 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.215:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:29 crc kubenswrapper[4926]: I1125 18:34:29.740300 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 18:34:29 crc kubenswrapper[4926]: I1125 18:34:29.776591 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:29 crc kubenswrapper[4926]: I1125 18:34:29.776598 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:31 crc kubenswrapper[4926]: I1125 18:34:31.227066 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 18:34:32 crc kubenswrapper[4926]: I1125 18:34:32.744423 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 18:34:32 crc kubenswrapper[4926]: I1125 18:34:32.779116 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 18:34:33 crc kubenswrapper[4926]: I1125 18:34:33.517293 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 18:34:33 crc kubenswrapper[4926]: I1125 18:34:33.541508 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:34:33 crc kubenswrapper[4926]: I1125 18:34:33.541573 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:34:34 crc kubenswrapper[4926]: I1125 18:34:34.925765 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:34:34 crc kubenswrapper[4926]: I1125 18:34:34.926397 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8" containerName="kube-state-metrics" containerID="cri-o://7b5790e546b285a77b792d10763aefeb92245339fdec2cc6858f05ffbfc9ecca" gracePeriod=30 Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.483350 4926 generic.go:334] "Generic (PLEG): container finished" podID="c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8" containerID="7b5790e546b285a77b792d10763aefeb92245339fdec2cc6858f05ffbfc9ecca" exitCode=2 Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.483523 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8","Type":"ContainerDied","Data":"7b5790e546b285a77b792d10763aefeb92245339fdec2cc6858f05ffbfc9ecca"} Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.483657 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8","Type":"ContainerDied","Data":"99bcbec0308442ba333870b02c84ff0ec32f2801a06df92278567b65a0fc8859"} Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.483674 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99bcbec0308442ba333870b02c84ff0ec32f2801a06df92278567b65a0fc8859" Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.494943 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.567120 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96jtv\" (UniqueName: \"kubernetes.io/projected/c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8-kube-api-access-96jtv\") pod \"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8\" (UID: \"c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8\") " Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.575570 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8-kube-api-access-96jtv" (OuterVolumeSpecName: "kube-api-access-96jtv") pod "c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8" (UID: "c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8"). InnerVolumeSpecName "kube-api-access-96jtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:35 crc kubenswrapper[4926]: I1125 18:34:35.669736 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96jtv\" (UniqueName: \"kubernetes.io/projected/c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8-kube-api-access-96jtv\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.499934 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.535120 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.548030 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.562300 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:34:36 crc kubenswrapper[4926]: E1125 18:34:36.562834 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8" containerName="kube-state-metrics" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.562855 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8" containerName="kube-state-metrics" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.563037 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8" containerName="kube-state-metrics" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.563739 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.572036 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.573128 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.588424 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.689826 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.690012 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.690099 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqxfk\" (UniqueName: \"kubernetes.io/projected/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-api-access-tqxfk\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.690142 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.791690 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.791766 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqxfk\" (UniqueName: \"kubernetes.io/projected/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-api-access-tqxfk\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.791817 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.792214 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.798500 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.798931 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.802088 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.810845 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqxfk\" (UniqueName: \"kubernetes.io/projected/e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1-kube-api-access-tqxfk\") pod \"kube-state-metrics-0\" (UID: \"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1\") " pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.882922 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.985828 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.986285 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-central-agent" containerID="cri-o://d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725" gracePeriod=30 Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.986363 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="proxy-httpd" containerID="cri-o://579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3" gracePeriod=30 Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.986460 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="sg-core" containerID="cri-o://311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b" gracePeriod=30 Nov 25 18:34:36 crc kubenswrapper[4926]: I1125 18:34:36.986472 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-notification-agent" containerID="cri-o://bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236" gracePeriod=30 Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.470167 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 18:34:37 crc kubenswrapper[4926]: W1125 18:34:37.477315 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3d61c67_f446_4b4b_a9fb_9e62d24c7cb1.slice/crio-97594da520db4f73e533f0a08fa663d0447a93c3d2e3141ba733b163168bd4a3 WatchSource:0}: Error finding container 97594da520db4f73e533f0a08fa663d0447a93c3d2e3141ba733b163168bd4a3: Status 404 returned error can't find the container with id 97594da520db4f73e533f0a08fa663d0447a93c3d2e3141ba733b163168bd4a3 Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.511744 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1","Type":"ContainerStarted","Data":"97594da520db4f73e533f0a08fa663d0447a93c3d2e3141ba733b163168bd4a3"} Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.515138 4926 generic.go:334] "Generic (PLEG): container finished" podID="438d3925-3d4b-4477-bd84-f7685a322498" containerID="579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3" exitCode=0 Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.515177 4926 generic.go:334] "Generic (PLEG): container finished" podID="438d3925-3d4b-4477-bd84-f7685a322498" containerID="311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b" exitCode=2 Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.515188 4926 generic.go:334] "Generic (PLEG): container finished" podID="438d3925-3d4b-4477-bd84-f7685a322498" containerID="d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725" exitCode=0 Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.515211 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerDied","Data":"579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3"} Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.515240 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerDied","Data":"311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b"} Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.515253 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerDied","Data":"d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725"} Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.690659 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.700652 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 18:34:37 crc kubenswrapper[4926]: I1125 18:34:37.700880 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.345496 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8" path="/var/lib/kubelet/pods/c6bdccbf-9590-4fa7-8d79-8f7a99e8dcc8/volumes" Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.528599 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1","Type":"ContainerStarted","Data":"9b0fa6f26c858dc2369c662a34c73ee3a8d167eaa7efee6bd9a93dbf2aec4638"} Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.537087 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.549521 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.162537502 podStartE2EDuration="2.549503579s" podCreationTimestamp="2025-11-25 18:34:36 +0000 UTC" firstStartedPulling="2025-11-25 18:34:37.481051444 +0000 UTC m=+1307.866565049" lastFinishedPulling="2025-11-25 18:34:37.868017511 +0000 UTC m=+1308.253531126" observedRunningTime="2025-11-25 18:34:38.543916501 +0000 UTC m=+1308.929430136" watchObservedRunningTime="2025-11-25 18:34:38.549503579 +0000 UTC m=+1308.935017194" Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.753819 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.755164 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.761788 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 18:34:38 crc kubenswrapper[4926]: I1125 18:34:38.853533 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.536078 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.537516 4926 generic.go:334] "Generic (PLEG): container finished" podID="ef389464-cca4-4177-a709-f13850dc4689" containerID="5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b" exitCode=137 Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.538657 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef389464-cca4-4177-a709-f13850dc4689","Type":"ContainerDied","Data":"5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b"} Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.538713 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.538737 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ef389464-cca4-4177-a709-f13850dc4689","Type":"ContainerDied","Data":"4c6922111e503ea0d2e00073ccdb115a127a64bebe7269ea8e9d51ccf774013f"} Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.540296 4926 scope.go:117] "RemoveContainer" containerID="5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.540430 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.546858 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.567761 4926 scope.go:117] "RemoveContainer" containerID="5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b" Nov 25 18:34:39 crc kubenswrapper[4926]: E1125 18:34:39.568274 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b\": container with ID starting with 5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b not found: ID does not exist" containerID="5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.568342 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b"} err="failed to get container status \"5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b\": rpc error: code = NotFound desc = could not find container \"5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b\": container with ID starting with 5e4a696ea277da5801dd6370d757e06c7d04a767430c4b319a7188412507581b not found: ID does not exist" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.667079 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-config-data\") pod \"ef389464-cca4-4177-a709-f13850dc4689\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.667150 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-combined-ca-bundle\") pod \"ef389464-cca4-4177-a709-f13850dc4689\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.667204 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6pwr\" (UniqueName: \"kubernetes.io/projected/ef389464-cca4-4177-a709-f13850dc4689-kube-api-access-v6pwr\") pod \"ef389464-cca4-4177-a709-f13850dc4689\" (UID: \"ef389464-cca4-4177-a709-f13850dc4689\") " Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.697640 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef389464-cca4-4177-a709-f13850dc4689-kube-api-access-v6pwr" (OuterVolumeSpecName: "kube-api-access-v6pwr") pod "ef389464-cca4-4177-a709-f13850dc4689" (UID: "ef389464-cca4-4177-a709-f13850dc4689"). InnerVolumeSpecName "kube-api-access-v6pwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.747122 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b7d54555f-9d6bc"] Nov 25 18:34:39 crc kubenswrapper[4926]: E1125 18:34:39.747652 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef389464-cca4-4177-a709-f13850dc4689" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.747669 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef389464-cca4-4177-a709-f13850dc4689" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.748948 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-config-data" (OuterVolumeSpecName: "config-data") pod "ef389464-cca4-4177-a709-f13850dc4689" (UID: "ef389464-cca4-4177-a709-f13850dc4689"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.750821 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef389464-cca4-4177-a709-f13850dc4689" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.752981 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.774431 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b7d54555f-9d6bc"] Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.774462 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.774792 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6pwr\" (UniqueName: \"kubernetes.io/projected/ef389464-cca4-4177-a709-f13850dc4689-kube-api-access-v6pwr\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.791670 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ef389464-cca4-4177-a709-f13850dc4689" (UID: "ef389464-cca4-4177-a709-f13850dc4689"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.876802 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glxvm\" (UniqueName: \"kubernetes.io/projected/d25d769e-8387-460f-806f-065854877f2c-kube-api-access-glxvm\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.876854 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.876894 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-swift-storage-0\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.876947 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-config\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.876979 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.877000 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-svc\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.877159 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef389464-cca4-4177-a709-f13850dc4689-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.979110 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-config\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.979203 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.979258 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-svc\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.979504 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glxvm\" (UniqueName: \"kubernetes.io/projected/d25d769e-8387-460f-806f-065854877f2c-kube-api-access-glxvm\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.979587 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.979668 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-swift-storage-0\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.980215 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-config\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.980252 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.980578 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-swift-storage-0\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.980644 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.981058 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-svc\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:39 crc kubenswrapper[4926]: I1125 18:34:39.996518 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glxvm\" (UniqueName: \"kubernetes.io/projected/d25d769e-8387-460f-806f-065854877f2c-kube-api-access-glxvm\") pod \"dnsmasq-dns-7b7d54555f-9d6bc\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.119935 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.533424 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.566871 4926 generic.go:334] "Generic (PLEG): container finished" podID="438d3925-3d4b-4477-bd84-f7685a322498" containerID="bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236" exitCode=0 Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.566977 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerDied","Data":"bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236"} Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.567018 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"438d3925-3d4b-4477-bd84-f7685a322498","Type":"ContainerDied","Data":"31950f3f028b9db5a499a6ee0f0ca94286f04bd94cc3401b487853675ec74a96"} Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.567055 4926 scope.go:117] "RemoveContainer" containerID="579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.567295 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.571035 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.619993 4926 scope.go:117] "RemoveContainer" containerID="311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.634463 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.671569 4926 scope.go:117] "RemoveContainer" containerID="bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.682587 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.695445 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.695919 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-notification-agent" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.695937 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-notification-agent" Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.695967 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="sg-core" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.695974 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="sg-core" Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.695982 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="proxy-httpd" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.695989 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="proxy-httpd" Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.696020 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-central-agent" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.696029 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-central-agent" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.696203 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-central-agent" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.696213 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="sg-core" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.696228 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="ceilometer-notification-agent" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.696247 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="438d3925-3d4b-4477-bd84-f7685a322498" containerName="proxy-httpd" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.697035 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.700890 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.701068 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.701182 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.704989 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.705609 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-log-httpd\") pod \"438d3925-3d4b-4477-bd84-f7685a322498\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.705717 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-sg-core-conf-yaml\") pod \"438d3925-3d4b-4477-bd84-f7685a322498\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.705757 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qm4qf\" (UniqueName: \"kubernetes.io/projected/438d3925-3d4b-4477-bd84-f7685a322498-kube-api-access-qm4qf\") pod \"438d3925-3d4b-4477-bd84-f7685a322498\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.705782 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-combined-ca-bundle\") pod \"438d3925-3d4b-4477-bd84-f7685a322498\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.705869 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-scripts\") pod \"438d3925-3d4b-4477-bd84-f7685a322498\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.705903 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-config-data\") pod \"438d3925-3d4b-4477-bd84-f7685a322498\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.705937 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-run-httpd\") pod \"438d3925-3d4b-4477-bd84-f7685a322498\" (UID: \"438d3925-3d4b-4477-bd84-f7685a322498\") " Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.707222 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "438d3925-3d4b-4477-bd84-f7685a322498" (UID: "438d3925-3d4b-4477-bd84-f7685a322498"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.710028 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "438d3925-3d4b-4477-bd84-f7685a322498" (UID: "438d3925-3d4b-4477-bd84-f7685a322498"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.714601 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-scripts" (OuterVolumeSpecName: "scripts") pod "438d3925-3d4b-4477-bd84-f7685a322498" (UID: "438d3925-3d4b-4477-bd84-f7685a322498"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.715362 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/438d3925-3d4b-4477-bd84-f7685a322498-kube-api-access-qm4qf" (OuterVolumeSpecName: "kube-api-access-qm4qf") pod "438d3925-3d4b-4477-bd84-f7685a322498" (UID: "438d3925-3d4b-4477-bd84-f7685a322498"). InnerVolumeSpecName "kube-api-access-qm4qf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.720252 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b7d54555f-9d6bc"] Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.749147 4926 scope.go:117] "RemoveContainer" containerID="d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.750962 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "438d3925-3d4b-4477-bd84-f7685a322498" (UID: "438d3925-3d4b-4477-bd84-f7685a322498"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.798015 4926 scope.go:117] "RemoveContainer" containerID="579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3" Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.799513 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3\": container with ID starting with 579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3 not found: ID does not exist" containerID="579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.799562 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3"} err="failed to get container status \"579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3\": rpc error: code = NotFound desc = could not find container \"579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3\": container with ID starting with 579a40d6fd33806c3c063ea0d5a2caf603459b9c89abb7ce5df4bf2793f147d3 not found: ID does not exist" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.799590 4926 scope.go:117] "RemoveContainer" containerID="311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b" Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.800007 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b\": container with ID starting with 311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b not found: ID does not exist" containerID="311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.800044 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b"} err="failed to get container status \"311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b\": rpc error: code = NotFound desc = could not find container \"311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b\": container with ID starting with 311ad15f9a544121d463984d7483d85778f79b6901366fbb84b329dcc3882e8b not found: ID does not exist" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.800070 4926 scope.go:117] "RemoveContainer" containerID="bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236" Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.801475 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236\": container with ID starting with bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236 not found: ID does not exist" containerID="bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.801515 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236"} err="failed to get container status \"bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236\": rpc error: code = NotFound desc = could not find container \"bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236\": container with ID starting with bde4353eedb846b5dc46734d9edcc099c8410f311f631ea5114bb69c16ac9236 not found: ID does not exist" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.801544 4926 scope.go:117] "RemoveContainer" containerID="d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725" Nov 25 18:34:40 crc kubenswrapper[4926]: E1125 18:34:40.802640 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725\": container with ID starting with d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725 not found: ID does not exist" containerID="d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.802669 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725"} err="failed to get container status \"d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725\": rpc error: code = NotFound desc = could not find container \"d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725\": container with ID starting with d0cbd11e784e420c93de0b8095a456a4f59049e717162b6ea6823d08b0a0d725 not found: ID does not exist" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.809416 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.809463 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrsr7\" (UniqueName: \"kubernetes.io/projected/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-kube-api-access-xrsr7\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.809528 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.809772 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.809873 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.810027 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.810045 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qm4qf\" (UniqueName: \"kubernetes.io/projected/438d3925-3d4b-4477-bd84-f7685a322498-kube-api-access-qm4qf\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.810056 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.810066 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.810074 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/438d3925-3d4b-4477-bd84-f7685a322498-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.823478 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-config-data" (OuterVolumeSpecName: "config-data") pod "438d3925-3d4b-4477-bd84-f7685a322498" (UID: "438d3925-3d4b-4477-bd84-f7685a322498"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.833050 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "438d3925-3d4b-4477-bd84-f7685a322498" (UID: "438d3925-3d4b-4477-bd84-f7685a322498"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.905164 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.910863 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.912576 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.912636 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.912683 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.912713 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrsr7\" (UniqueName: \"kubernetes.io/projected/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-kube-api-access-xrsr7\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.912776 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.912842 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.912859 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438d3925-3d4b-4477-bd84-f7685a322498-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.919413 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.921981 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.924488 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.925629 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.933596 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.936059 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.936992 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrsr7\" (UniqueName: \"kubernetes.io/projected/ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae-kube-api-access-xrsr7\") pod \"nova-cell1-novncproxy-0\" (UID: \"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.938509 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.938670 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.938799 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:34:40 crc kubenswrapper[4926]: I1125 18:34:40.946104 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014516 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014577 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-config-data\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014613 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-scripts\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014630 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-run-httpd\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014660 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014703 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014730 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnwkb\" (UniqueName: \"kubernetes.io/projected/52d0d555-c16c-4d44-8651-2d799bb55d83-kube-api-access-rnwkb\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.014791 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-log-httpd\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.038443 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117445 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117513 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-config-data\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117548 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-scripts\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117563 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-run-httpd\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117594 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117648 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117681 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnwkb\" (UniqueName: \"kubernetes.io/projected/52d0d555-c16c-4d44-8651-2d799bb55d83-kube-api-access-rnwkb\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.117752 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-log-httpd\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.118186 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-log-httpd\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.121174 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-run-httpd\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.122005 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.124914 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-config-data\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.134804 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-scripts\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.135397 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.138399 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.140198 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnwkb\" (UniqueName: \"kubernetes.io/projected/52d0d555-c16c-4d44-8651-2d799bb55d83-kube-api-access-rnwkb\") pod \"ceilometer-0\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " pod="openstack/ceilometer-0" Nov 25 18:34:41 crc kubenswrapper[4926]: I1125 18:34:41.375863 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:41.517879 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 18:34:42 crc kubenswrapper[4926]: W1125 18:34:41.552752 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea5ab9dd_7cab_491d_820f_fb35dbe3e2ae.slice/crio-89f64b59e25ff178677b6f51abcbed58b2b8d0705d7b0730292532e5d9bd8e0d WatchSource:0}: Error finding container 89f64b59e25ff178677b6f51abcbed58b2b8d0705d7b0730292532e5d9bd8e0d: Status 404 returned error can't find the container with id 89f64b59e25ff178677b6f51abcbed58b2b8d0705d7b0730292532e5d9bd8e0d Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:41.584532 4926 generic.go:334] "Generic (PLEG): container finished" podID="d25d769e-8387-460f-806f-065854877f2c" containerID="0dbc577f20d1f69c6b3ea088bf40ab4cd1449fa1a59891b1c16cb158a4cdccf8" exitCode=0 Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:41.584592 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" event={"ID":"d25d769e-8387-460f-806f-065854877f2c","Type":"ContainerDied","Data":"0dbc577f20d1f69c6b3ea088bf40ab4cd1449fa1a59891b1c16cb158a4cdccf8"} Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:41.584618 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" event={"ID":"d25d769e-8387-460f-806f-065854877f2c","Type":"ContainerStarted","Data":"305e3f03644a2ee91991fc9f0b83452e2d3d953b3414cdca595977c97759c2b9"} Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:41.585565 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae","Type":"ContainerStarted","Data":"89f64b59e25ff178677b6f51abcbed58b2b8d0705d7b0730292532e5d9bd8e0d"} Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.192638 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.341354 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="438d3925-3d4b-4477-bd84-f7685a322498" path="/var/lib/kubelet/pods/438d3925-3d4b-4477-bd84-f7685a322498/volumes" Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.342473 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef389464-cca4-4177-a709-f13850dc4689" path="/var/lib/kubelet/pods/ef389464-cca4-4177-a709-f13850dc4689/volumes" Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.598938 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" event={"ID":"d25d769e-8387-460f-806f-065854877f2c","Type":"ContainerStarted","Data":"b2237bb72ad9e3d708536c2e505799168709ae436342e22fa31c6853c52e1eb1"} Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.600241 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.602128 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-log" containerID="cri-o://dc9291bc258f7e85a5539f094c4cd0a41cad6ac855cc51a49cbdd4e72c654ff4" gracePeriod=30 Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.602589 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae","Type":"ContainerStarted","Data":"ccdc7a5c9c643b282673c7406e69c9bc53b6513d63ad4ca1c526e38c5e0a774b"} Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.602621 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-api" containerID="cri-o://26627a60dc1773bd1470d98f4de0381a3324901df83fa84e8f0a2439772867c6" gracePeriod=30 Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.649100 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" podStartSLOduration=3.64908327 podStartE2EDuration="3.64908327s" podCreationTimestamp="2025-11-25 18:34:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:42.623865837 +0000 UTC m=+1313.009379452" watchObservedRunningTime="2025-11-25 18:34:42.64908327 +0000 UTC m=+1313.034596885" Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.653595 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.653582062 podStartE2EDuration="2.653582062s" podCreationTimestamp="2025-11-25 18:34:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:42.64420279 +0000 UTC m=+1313.029716395" watchObservedRunningTime="2025-11-25 18:34:42.653582062 +0000 UTC m=+1313.039095677" Nov 25 18:34:42 crc kubenswrapper[4926]: I1125 18:34:42.989690 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:43 crc kubenswrapper[4926]: I1125 18:34:43.570228 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:43 crc kubenswrapper[4926]: I1125 18:34:43.613520 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerStarted","Data":"61dc0b664d1a7c1b015537c6ec735c765b520a6414e9bcec5f456efefd42b65f"} Nov 25 18:34:43 crc kubenswrapper[4926]: I1125 18:34:43.613572 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerStarted","Data":"a9829c64e7dcdcbf2003eda1802cbd44e0ff90c96fe4688e95af1be1e0d10f5a"} Nov 25 18:34:43 crc kubenswrapper[4926]: I1125 18:34:43.620776 4926 generic.go:334] "Generic (PLEG): container finished" podID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerID="dc9291bc258f7e85a5539f094c4cd0a41cad6ac855cc51a49cbdd4e72c654ff4" exitCode=143 Nov 25 18:34:43 crc kubenswrapper[4926]: I1125 18:34:43.620853 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dd77a81c-f483-45c2-98a4-f0211de856a6","Type":"ContainerDied","Data":"dc9291bc258f7e85a5539f094c4cd0a41cad6ac855cc51a49cbdd4e72c654ff4"} Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.686761 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerStarted","Data":"b9e7540eee3f72741186ca65885f6d660c14d04d0c7b7a74fe91eea59f34beeb"} Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.690830 4926 generic.go:334] "Generic (PLEG): container finished" podID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerID="26627a60dc1773bd1470d98f4de0381a3324901df83fa84e8f0a2439772867c6" exitCode=0 Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.691942 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dd77a81c-f483-45c2-98a4-f0211de856a6","Type":"ContainerDied","Data":"26627a60dc1773bd1470d98f4de0381a3324901df83fa84e8f0a2439772867c6"} Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.910567 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.972844 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd77a81c-f483-45c2-98a4-f0211de856a6-logs\") pod \"dd77a81c-f483-45c2-98a4-f0211de856a6\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.973127 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5jl9\" (UniqueName: \"kubernetes.io/projected/dd77a81c-f483-45c2-98a4-f0211de856a6-kube-api-access-r5jl9\") pod \"dd77a81c-f483-45c2-98a4-f0211de856a6\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.973232 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-combined-ca-bundle\") pod \"dd77a81c-f483-45c2-98a4-f0211de856a6\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.973296 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-config-data\") pod \"dd77a81c-f483-45c2-98a4-f0211de856a6\" (UID: \"dd77a81c-f483-45c2-98a4-f0211de856a6\") " Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.973360 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd77a81c-f483-45c2-98a4-f0211de856a6-logs" (OuterVolumeSpecName: "logs") pod "dd77a81c-f483-45c2-98a4-f0211de856a6" (UID: "dd77a81c-f483-45c2-98a4-f0211de856a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.974107 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd77a81c-f483-45c2-98a4-f0211de856a6-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:44 crc kubenswrapper[4926]: I1125 18:34:44.977264 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd77a81c-f483-45c2-98a4-f0211de856a6-kube-api-access-r5jl9" (OuterVolumeSpecName: "kube-api-access-r5jl9") pod "dd77a81c-f483-45c2-98a4-f0211de856a6" (UID: "dd77a81c-f483-45c2-98a4-f0211de856a6"). InnerVolumeSpecName "kube-api-access-r5jl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.028361 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-config-data" (OuterVolumeSpecName: "config-data") pod "dd77a81c-f483-45c2-98a4-f0211de856a6" (UID: "dd77a81c-f483-45c2-98a4-f0211de856a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.040336 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd77a81c-f483-45c2-98a4-f0211de856a6" (UID: "dd77a81c-f483-45c2-98a4-f0211de856a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.076899 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5jl9\" (UniqueName: \"kubernetes.io/projected/dd77a81c-f483-45c2-98a4-f0211de856a6-kube-api-access-r5jl9\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.076929 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.076939 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd77a81c-f483-45c2-98a4-f0211de856a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.702450 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.702492 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dd77a81c-f483-45c2-98a4-f0211de856a6","Type":"ContainerDied","Data":"73e3232aa159ddb79801be338297fc61584a7ceb3139636be150235cad24ec66"} Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.702986 4926 scope.go:117] "RemoveContainer" containerID="26627a60dc1773bd1470d98f4de0381a3324901df83fa84e8f0a2439772867c6" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.706183 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerStarted","Data":"de7bef79350ebe9a4a04dc760b6b1276edbb05584dcbd2901a8c29bf16c2e92b"} Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.735512 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.743419 4926 scope.go:117] "RemoveContainer" containerID="dc9291bc258f7e85a5539f094c4cd0a41cad6ac855cc51a49cbdd4e72c654ff4" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.753892 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.774549 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:45 crc kubenswrapper[4926]: E1125 18:34:45.775033 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-log" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.775052 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-log" Nov 25 18:34:45 crc kubenswrapper[4926]: E1125 18:34:45.775067 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-api" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.775075 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-api" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.775248 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-api" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.775285 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" containerName="nova-api-log" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.776366 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.785108 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.785826 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.785995 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.786704 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.816181 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee81242f-11f7-4a0d-83c7-1f97bbf41156-logs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.816473 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.816530 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-config-data\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.816547 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.816575 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzvt5\" (UniqueName: \"kubernetes.io/projected/ee81242f-11f7-4a0d-83c7-1f97bbf41156-kube-api-access-gzvt5\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.816662 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-public-tls-certs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.919522 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee81242f-11f7-4a0d-83c7-1f97bbf41156-logs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.919635 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.919664 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-config-data\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.919683 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.919705 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzvt5\" (UniqueName: \"kubernetes.io/projected/ee81242f-11f7-4a0d-83c7-1f97bbf41156-kube-api-access-gzvt5\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.919747 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-public-tls-certs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.920018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee81242f-11f7-4a0d-83c7-1f97bbf41156-logs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.926598 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-config-data\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.932011 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.932049 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-public-tls-certs\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.935106 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:45 crc kubenswrapper[4926]: I1125 18:34:45.947582 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzvt5\" (UniqueName: \"kubernetes.io/projected/ee81242f-11f7-4a0d-83c7-1f97bbf41156-kube-api-access-gzvt5\") pod \"nova-api-0\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " pod="openstack/nova-api-0" Nov 25 18:34:46 crc kubenswrapper[4926]: I1125 18:34:46.041070 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:46 crc kubenswrapper[4926]: I1125 18:34:46.103230 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:34:46 crc kubenswrapper[4926]: I1125 18:34:46.342270 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd77a81c-f483-45c2-98a4-f0211de856a6" path="/var/lib/kubelet/pods/dd77a81c-f483-45c2-98a4-f0211de856a6/volumes" Nov 25 18:34:46 crc kubenswrapper[4926]: I1125 18:34:46.562964 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:34:46 crc kubenswrapper[4926]: W1125 18:34:46.572222 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee81242f_11f7_4a0d_83c7_1f97bbf41156.slice/crio-464d883aea7a376cf1ea159a1e4afce07faad5819cc9d042a1494fa33812e343 WatchSource:0}: Error finding container 464d883aea7a376cf1ea159a1e4afce07faad5819cc9d042a1494fa33812e343: Status 404 returned error can't find the container with id 464d883aea7a376cf1ea159a1e4afce07faad5819cc9d042a1494fa33812e343 Nov 25 18:34:46 crc kubenswrapper[4926]: I1125 18:34:46.720307 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee81242f-11f7-4a0d-83c7-1f97bbf41156","Type":"ContainerStarted","Data":"464d883aea7a376cf1ea159a1e4afce07faad5819cc9d042a1494fa33812e343"} Nov 25 18:34:46 crc kubenswrapper[4926]: I1125 18:34:46.905284 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.747077 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee81242f-11f7-4a0d-83c7-1f97bbf41156","Type":"ContainerStarted","Data":"4abff4feba10cbe6d2f9e88339bb587e2a5c73a3ddc0898636154d41c420db3c"} Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.747359 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee81242f-11f7-4a0d-83c7-1f97bbf41156","Type":"ContainerStarted","Data":"181ada5d5873ba8e5396a7f12bdcefe968c4b0e5d0c97eb08e1ba12e76f96b89"} Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.751550 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerStarted","Data":"725fdcf542c642ca6d4c45ba3f02084acd5d9448b5377b6cf0eb78a71ea425f1"} Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.751675 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-central-agent" containerID="cri-o://61dc0b664d1a7c1b015537c6ec735c765b520a6414e9bcec5f456efefd42b65f" gracePeriod=30 Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.751854 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.751896 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="proxy-httpd" containerID="cri-o://725fdcf542c642ca6d4c45ba3f02084acd5d9448b5377b6cf0eb78a71ea425f1" gracePeriod=30 Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.751933 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="sg-core" containerID="cri-o://de7bef79350ebe9a4a04dc760b6b1276edbb05584dcbd2901a8c29bf16c2e92b" gracePeriod=30 Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.751971 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-notification-agent" containerID="cri-o://b9e7540eee3f72741186ca65885f6d660c14d04d0c7b7a74fe91eea59f34beeb" gracePeriod=30 Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.765764 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.765748717 podStartE2EDuration="2.765748717s" podCreationTimestamp="2025-11-25 18:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:47.763495151 +0000 UTC m=+1318.149008766" watchObservedRunningTime="2025-11-25 18:34:47.765748717 +0000 UTC m=+1318.151262322" Nov 25 18:34:47 crc kubenswrapper[4926]: I1125 18:34:47.787314 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.865948613 podStartE2EDuration="7.787298529s" podCreationTimestamp="2025-11-25 18:34:40 +0000 UTC" firstStartedPulling="2025-11-25 18:34:42.996419587 +0000 UTC m=+1313.381933212" lastFinishedPulling="2025-11-25 18:34:46.917769523 +0000 UTC m=+1317.303283128" observedRunningTime="2025-11-25 18:34:47.781760803 +0000 UTC m=+1318.167274408" watchObservedRunningTime="2025-11-25 18:34:47.787298529 +0000 UTC m=+1318.172812134" Nov 25 18:34:48 crc kubenswrapper[4926]: I1125 18:34:48.767176 4926 generic.go:334] "Generic (PLEG): container finished" podID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerID="725fdcf542c642ca6d4c45ba3f02084acd5d9448b5377b6cf0eb78a71ea425f1" exitCode=0 Nov 25 18:34:48 crc kubenswrapper[4926]: I1125 18:34:48.767557 4926 generic.go:334] "Generic (PLEG): container finished" podID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerID="de7bef79350ebe9a4a04dc760b6b1276edbb05584dcbd2901a8c29bf16c2e92b" exitCode=2 Nov 25 18:34:48 crc kubenswrapper[4926]: I1125 18:34:48.767572 4926 generic.go:334] "Generic (PLEG): container finished" podID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerID="b9e7540eee3f72741186ca65885f6d660c14d04d0c7b7a74fe91eea59f34beeb" exitCode=0 Nov 25 18:34:48 crc kubenswrapper[4926]: I1125 18:34:48.767277 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerDied","Data":"725fdcf542c642ca6d4c45ba3f02084acd5d9448b5377b6cf0eb78a71ea425f1"} Nov 25 18:34:48 crc kubenswrapper[4926]: I1125 18:34:48.767703 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerDied","Data":"de7bef79350ebe9a4a04dc760b6b1276edbb05584dcbd2901a8c29bf16c2e92b"} Nov 25 18:34:48 crc kubenswrapper[4926]: I1125 18:34:48.767737 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerDied","Data":"b9e7540eee3f72741186ca65885f6d660c14d04d0c7b7a74fe91eea59f34beeb"} Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.122669 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.218245 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c66c57c99-5w8zh"] Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.218560 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" podUID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerName="dnsmasq-dns" containerID="cri-o://0f035496039235591f5f6165aa313cf746247336ff4bf426cea037b2b61907c9" gracePeriod=10 Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.799829 4926 generic.go:334] "Generic (PLEG): container finished" podID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerID="0f035496039235591f5f6165aa313cf746247336ff4bf426cea037b2b61907c9" exitCode=0 Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.800167 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" event={"ID":"1aa441a9-06d8-4eaa-a0e8-ae79280303b0","Type":"ContainerDied","Data":"0f035496039235591f5f6165aa313cf746247336ff4bf426cea037b2b61907c9"} Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.800200 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" event={"ID":"1aa441a9-06d8-4eaa-a0e8-ae79280303b0","Type":"ContainerDied","Data":"19b5c022c30eccddf48d1cacaad26bb0df2ac2d532e452bc9dbe46cde620591b"} Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.800216 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19b5c022c30eccddf48d1cacaad26bb0df2ac2d532e452bc9dbe46cde620591b" Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.832726 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.952358 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-sb\") pod \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.952466 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-config\") pod \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.952489 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-svc\") pod \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.952518 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9flr\" (UniqueName: \"kubernetes.io/projected/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-kube-api-access-b9flr\") pod \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.952576 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-swift-storage-0\") pod \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.952619 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-nb\") pod \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\" (UID: \"1aa441a9-06d8-4eaa-a0e8-ae79280303b0\") " Nov 25 18:34:50 crc kubenswrapper[4926]: I1125 18:34:50.959597 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-kube-api-access-b9flr" (OuterVolumeSpecName: "kube-api-access-b9flr") pod "1aa441a9-06d8-4eaa-a0e8-ae79280303b0" (UID: "1aa441a9-06d8-4eaa-a0e8-ae79280303b0"). InnerVolumeSpecName "kube-api-access-b9flr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.011165 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1aa441a9-06d8-4eaa-a0e8-ae79280303b0" (UID: "1aa441a9-06d8-4eaa-a0e8-ae79280303b0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.020979 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1aa441a9-06d8-4eaa-a0e8-ae79280303b0" (UID: "1aa441a9-06d8-4eaa-a0e8-ae79280303b0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.029963 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1aa441a9-06d8-4eaa-a0e8-ae79280303b0" (UID: "1aa441a9-06d8-4eaa-a0e8-ae79280303b0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.036678 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1aa441a9-06d8-4eaa-a0e8-ae79280303b0" (UID: "1aa441a9-06d8-4eaa-a0e8-ae79280303b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.038836 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.040472 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-config" (OuterVolumeSpecName: "config") pod "1aa441a9-06d8-4eaa-a0e8-ae79280303b0" (UID: "1aa441a9-06d8-4eaa-a0e8-ae79280303b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.054854 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.054880 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.054911 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.054921 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9flr\" (UniqueName: \"kubernetes.io/projected/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-kube-api-access-b9flr\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.054929 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.054937 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1aa441a9-06d8-4eaa-a0e8-ae79280303b0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.059076 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.814709 4926 generic.go:334] "Generic (PLEG): container finished" podID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerID="61dc0b664d1a7c1b015537c6ec735c765b520a6414e9bcec5f456efefd42b65f" exitCode=0 Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.814789 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerDied","Data":"61dc0b664d1a7c1b015537c6ec735c765b520a6414e9bcec5f456efefd42b65f"} Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.815254 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c66c57c99-5w8zh" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.839182 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.857900 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c66c57c99-5w8zh"] Nov 25 18:34:51 crc kubenswrapper[4926]: I1125 18:34:51.870577 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c66c57c99-5w8zh"] Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.034035 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-f2gmd"] Nov 25 18:34:52 crc kubenswrapper[4926]: E1125 18:34:52.037802 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerName="init" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.037837 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerName="init" Nov 25 18:34:52 crc kubenswrapper[4926]: E1125 18:34:52.037857 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerName="dnsmasq-dns" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.037866 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerName="dnsmasq-dns" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.038109 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" containerName="dnsmasq-dns" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.038958 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.042221 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.042517 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.052497 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-f2gmd"] Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.059653 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.182996 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-config-data\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.184712 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-run-httpd\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.184779 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-combined-ca-bundle\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.184825 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-ceilometer-tls-certs\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.184866 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnwkb\" (UniqueName: \"kubernetes.io/projected/52d0d555-c16c-4d44-8651-2d799bb55d83-kube-api-access-rnwkb\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.184893 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-sg-core-conf-yaml\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.184937 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-scripts\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.185057 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-log-httpd\") pod \"52d0d555-c16c-4d44-8651-2d799bb55d83\" (UID: \"52d0d555-c16c-4d44-8651-2d799bb55d83\") " Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.185426 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.185657 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-scripts\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.185790 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-config-data\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.185831 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sqts\" (UniqueName: \"kubernetes.io/projected/113b163c-c6d9-498d-9743-a53689445970-kube-api-access-8sqts\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.186469 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.186695 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.191574 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52d0d555-c16c-4d44-8651-2d799bb55d83-kube-api-access-rnwkb" (OuterVolumeSpecName: "kube-api-access-rnwkb") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "kube-api-access-rnwkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.196517 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-scripts" (OuterVolumeSpecName: "scripts") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.230501 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.245675 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.285976 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288068 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288164 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-scripts\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288216 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-config-data\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288244 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sqts\" (UniqueName: \"kubernetes.io/projected/113b163c-c6d9-498d-9743-a53689445970-kube-api-access-8sqts\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288321 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288336 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288350 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288362 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnwkb\" (UniqueName: \"kubernetes.io/projected/52d0d555-c16c-4d44-8651-2d799bb55d83-kube-api-access-rnwkb\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288390 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288400 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.288413 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/52d0d555-c16c-4d44-8651-2d799bb55d83-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.292822 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-config-data\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.292988 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.293639 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-scripts\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.312240 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sqts\" (UniqueName: \"kubernetes.io/projected/113b163c-c6d9-498d-9743-a53689445970-kube-api-access-8sqts\") pod \"nova-cell1-cell-mapping-f2gmd\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.317668 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-config-data" (OuterVolumeSpecName: "config-data") pod "52d0d555-c16c-4d44-8651-2d799bb55d83" (UID: "52d0d555-c16c-4d44-8651-2d799bb55d83"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.344031 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aa441a9-06d8-4eaa-a0e8-ae79280303b0" path="/var/lib/kubelet/pods/1aa441a9-06d8-4eaa-a0e8-ae79280303b0/volumes" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.370797 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.394065 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d0d555-c16c-4d44-8651-2d799bb55d83-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.831193 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"52d0d555-c16c-4d44-8651-2d799bb55d83","Type":"ContainerDied","Data":"a9829c64e7dcdcbf2003eda1802cbd44e0ff90c96fe4688e95af1be1e0d10f5a"} Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.831254 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.831696 4926 scope.go:117] "RemoveContainer" containerID="725fdcf542c642ca6d4c45ba3f02084acd5d9448b5377b6cf0eb78a71ea425f1" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.865425 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.869556 4926 scope.go:117] "RemoveContainer" containerID="de7bef79350ebe9a4a04dc760b6b1276edbb05584dcbd2901a8c29bf16c2e92b" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.884078 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.895123 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:52 crc kubenswrapper[4926]: E1125 18:34:52.895611 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-notification-agent" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.895629 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-notification-agent" Nov 25 18:34:52 crc kubenswrapper[4926]: E1125 18:34:52.895638 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="sg-core" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.895644 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="sg-core" Nov 25 18:34:52 crc kubenswrapper[4926]: E1125 18:34:52.895651 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="proxy-httpd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.895657 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="proxy-httpd" Nov 25 18:34:52 crc kubenswrapper[4926]: E1125 18:34:52.895673 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-central-agent" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.895679 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-central-agent" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.896611 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-notification-agent" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.896651 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="sg-core" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.896674 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="ceilometer-central-agent" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.896685 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" containerName="proxy-httpd" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.898636 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.900908 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.906582 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.906839 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.934586 4926 scope.go:117] "RemoveContainer" containerID="b9e7540eee3f72741186ca65885f6d660c14d04d0c7b7a74fe91eea59f34beeb" Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.951590 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-f2gmd"] Nov 25 18:34:52 crc kubenswrapper[4926]: I1125 18:34:52.952705 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.007863 4926 scope.go:117] "RemoveContainer" containerID="61dc0b664d1a7c1b015537c6ec735c765b520a6414e9bcec5f456efefd42b65f" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.033810 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.033934 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxj4t\" (UniqueName: \"kubernetes.io/projected/e92da70d-89aa-4e1f-9961-3cb2334fc573-kube-api-access-fxj4t\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.034053 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-log-httpd\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.034117 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.034152 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-run-httpd\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.034216 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-scripts\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.034252 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-config-data\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.034298 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136255 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-log-httpd\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136330 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136354 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-run-httpd\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136388 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-scripts\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136418 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-config-data\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136445 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136476 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136508 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxj4t\" (UniqueName: \"kubernetes.io/projected/e92da70d-89aa-4e1f-9961-3cb2334fc573-kube-api-access-fxj4t\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.136709 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-log-httpd\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.137283 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-run-httpd\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.140302 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.140813 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.141744 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-config-data\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.142512 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-scripts\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.143755 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.153960 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxj4t\" (UniqueName: \"kubernetes.io/projected/e92da70d-89aa-4e1f-9961-3cb2334fc573-kube-api-access-fxj4t\") pod \"ceilometer-0\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.226789 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.852851 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f2gmd" event={"ID":"113b163c-c6d9-498d-9743-a53689445970","Type":"ContainerStarted","Data":"bf6cc4fa410ede7fbd50da255fa8af3773eebdb944008e53458ff7070075fbfe"} Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.853503 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f2gmd" event={"ID":"113b163c-c6d9-498d-9743-a53689445970","Type":"ContainerStarted","Data":"6429f89c3895a113493513b88d8607453cc171e6907399177036a62a9ded6484"} Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.886791 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-f2gmd" podStartSLOduration=1.8867540329999999 podStartE2EDuration="1.886754033s" podCreationTimestamp="2025-11-25 18:34:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:34:53.872783138 +0000 UTC m=+1324.258296773" watchObservedRunningTime="2025-11-25 18:34:53.886754033 +0000 UTC m=+1324.272267668" Nov 25 18:34:53 crc kubenswrapper[4926]: I1125 18:34:53.918682 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 18:34:54 crc kubenswrapper[4926]: I1125 18:34:54.348599 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52d0d555-c16c-4d44-8651-2d799bb55d83" path="/var/lib/kubelet/pods/52d0d555-c16c-4d44-8651-2d799bb55d83/volumes" Nov 25 18:34:54 crc kubenswrapper[4926]: I1125 18:34:54.866516 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerStarted","Data":"70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438"} Nov 25 18:34:54 crc kubenswrapper[4926]: I1125 18:34:54.866828 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerStarted","Data":"40b3aa50db332d427a84aa93a155df94b23951f83d0bc8e8aa18b22c3b3bc76a"} Nov 25 18:34:54 crc kubenswrapper[4926]: I1125 18:34:54.866837 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerStarted","Data":"522103df9fee15e565cc30348a25626ac7c4efe1b996e56c6a692d4a2093834b"} Nov 25 18:34:56 crc kubenswrapper[4926]: I1125 18:34:56.104695 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:34:56 crc kubenswrapper[4926]: I1125 18:34:56.105320 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:34:56 crc kubenswrapper[4926]: I1125 18:34:56.887133 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerStarted","Data":"9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad"} Nov 25 18:34:57 crc kubenswrapper[4926]: I1125 18:34:57.125604 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.223:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:57 crc kubenswrapper[4926]: I1125 18:34:57.126020 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.223:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 18:34:57 crc kubenswrapper[4926]: I1125 18:34:57.899524 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerStarted","Data":"aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569"} Nov 25 18:34:57 crc kubenswrapper[4926]: I1125 18:34:57.901257 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 18:34:57 crc kubenswrapper[4926]: I1125 18:34:57.935312 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.649557581 podStartE2EDuration="5.935290743s" podCreationTimestamp="2025-11-25 18:34:52 +0000 UTC" firstStartedPulling="2025-11-25 18:34:53.936207355 +0000 UTC m=+1324.321720960" lastFinishedPulling="2025-11-25 18:34:57.221940497 +0000 UTC m=+1327.607454122" observedRunningTime="2025-11-25 18:34:57.932036142 +0000 UTC m=+1328.317549777" watchObservedRunningTime="2025-11-25 18:34:57.935290743 +0000 UTC m=+1328.320804358" Nov 25 18:34:58 crc kubenswrapper[4926]: I1125 18:34:58.911487 4926 generic.go:334] "Generic (PLEG): container finished" podID="113b163c-c6d9-498d-9743-a53689445970" containerID="bf6cc4fa410ede7fbd50da255fa8af3773eebdb944008e53458ff7070075fbfe" exitCode=0 Nov 25 18:34:58 crc kubenswrapper[4926]: I1125 18:34:58.911502 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f2gmd" event={"ID":"113b163c-c6d9-498d-9743-a53689445970","Type":"ContainerDied","Data":"bf6cc4fa410ede7fbd50da255fa8af3773eebdb944008e53458ff7070075fbfe"} Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.390640 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.420249 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-scripts\") pod \"113b163c-c6d9-498d-9743-a53689445970\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.420360 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-config-data\") pod \"113b163c-c6d9-498d-9743-a53689445970\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.420441 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sqts\" (UniqueName: \"kubernetes.io/projected/113b163c-c6d9-498d-9743-a53689445970-kube-api-access-8sqts\") pod \"113b163c-c6d9-498d-9743-a53689445970\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.420465 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-combined-ca-bundle\") pod \"113b163c-c6d9-498d-9743-a53689445970\" (UID: \"113b163c-c6d9-498d-9743-a53689445970\") " Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.427366 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/113b163c-c6d9-498d-9743-a53689445970-kube-api-access-8sqts" (OuterVolumeSpecName: "kube-api-access-8sqts") pod "113b163c-c6d9-498d-9743-a53689445970" (UID: "113b163c-c6d9-498d-9743-a53689445970"). InnerVolumeSpecName "kube-api-access-8sqts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.445341 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-scripts" (OuterVolumeSpecName: "scripts") pod "113b163c-c6d9-498d-9743-a53689445970" (UID: "113b163c-c6d9-498d-9743-a53689445970"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.522687 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sqts\" (UniqueName: \"kubernetes.io/projected/113b163c-c6d9-498d-9743-a53689445970-kube-api-access-8sqts\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.523023 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.622179 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-config-data" (OuterVolumeSpecName: "config-data") pod "113b163c-c6d9-498d-9743-a53689445970" (UID: "113b163c-c6d9-498d-9743-a53689445970"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.622199 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "113b163c-c6d9-498d-9743-a53689445970" (UID: "113b163c-c6d9-498d-9743-a53689445970"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.624802 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.624843 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/113b163c-c6d9-498d-9743-a53689445970-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.937105 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f2gmd" event={"ID":"113b163c-c6d9-498d-9743-a53689445970","Type":"ContainerDied","Data":"6429f89c3895a113493513b88d8607453cc171e6907399177036a62a9ded6484"} Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.937155 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6429f89c3895a113493513b88d8607453cc171e6907399177036a62a9ded6484" Nov 25 18:35:00 crc kubenswrapper[4926]: I1125 18:35:00.937193 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f2gmd" Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.147357 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.147987 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-api" containerID="cri-o://4abff4feba10cbe6d2f9e88339bb587e2a5c73a3ddc0898636154d41c420db3c" gracePeriod=30 Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.148578 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-log" containerID="cri-o://181ada5d5873ba8e5396a7f12bdcefe968c4b0e5d0c97eb08e1ba12e76f96b89" gracePeriod=30 Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.158590 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.158882 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="83dbed29-b873-4c13-8dd2-82577a28bb81" containerName="nova-scheduler-scheduler" containerID="cri-o://1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" gracePeriod=30 Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.197355 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.197814 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-metadata" containerID="cri-o://8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776" gracePeriod=30 Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.197654 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-log" containerID="cri-o://5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028" gracePeriod=30 Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.948581 4926 generic.go:334] "Generic (PLEG): container finished" podID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerID="5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028" exitCode=143 Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.948644 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63dc318-bb0d-492a-b59a-1944cc047b83","Type":"ContainerDied","Data":"5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028"} Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.950889 4926 generic.go:334] "Generic (PLEG): container finished" podID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerID="181ada5d5873ba8e5396a7f12bdcefe968c4b0e5d0c97eb08e1ba12e76f96b89" exitCode=143 Nov 25 18:35:01 crc kubenswrapper[4926]: I1125 18:35:01.950920 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee81242f-11f7-4a0d-83c7-1f97bbf41156","Type":"ContainerDied","Data":"181ada5d5873ba8e5396a7f12bdcefe968c4b0e5d0c97eb08e1ba12e76f96b89"} Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.508841 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.572570 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-combined-ca-bundle\") pod \"d63dc318-bb0d-492a-b59a-1944cc047b83\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.572745 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-nova-metadata-tls-certs\") pod \"d63dc318-bb0d-492a-b59a-1944cc047b83\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.572819 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63dc318-bb0d-492a-b59a-1944cc047b83-logs\") pod \"d63dc318-bb0d-492a-b59a-1944cc047b83\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.572854 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-config-data\") pod \"d63dc318-bb0d-492a-b59a-1944cc047b83\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.572876 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cn5bc\" (UniqueName: \"kubernetes.io/projected/d63dc318-bb0d-492a-b59a-1944cc047b83-kube-api-access-cn5bc\") pod \"d63dc318-bb0d-492a-b59a-1944cc047b83\" (UID: \"d63dc318-bb0d-492a-b59a-1944cc047b83\") " Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.574691 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d63dc318-bb0d-492a-b59a-1944cc047b83-logs" (OuterVolumeSpecName: "logs") pod "d63dc318-bb0d-492a-b59a-1944cc047b83" (UID: "d63dc318-bb0d-492a-b59a-1944cc047b83"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.582733 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d63dc318-bb0d-492a-b59a-1944cc047b83-kube-api-access-cn5bc" (OuterVolumeSpecName: "kube-api-access-cn5bc") pod "d63dc318-bb0d-492a-b59a-1944cc047b83" (UID: "d63dc318-bb0d-492a-b59a-1944cc047b83"). InnerVolumeSpecName "kube-api-access-cn5bc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.625640 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-config-data" (OuterVolumeSpecName: "config-data") pod "d63dc318-bb0d-492a-b59a-1944cc047b83" (UID: "d63dc318-bb0d-492a-b59a-1944cc047b83"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.639700 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d63dc318-bb0d-492a-b59a-1944cc047b83" (UID: "d63dc318-bb0d-492a-b59a-1944cc047b83"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.663464 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "d63dc318-bb0d-492a-b59a-1944cc047b83" (UID: "d63dc318-bb0d-492a-b59a-1944cc047b83"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.674767 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.674804 4926 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.674817 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63dc318-bb0d-492a-b59a-1944cc047b83-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.674829 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63dc318-bb0d-492a-b59a-1944cc047b83-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.674838 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cn5bc\" (UniqueName: \"kubernetes.io/projected/d63dc318-bb0d-492a-b59a-1944cc047b83-kube-api-access-cn5bc\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:02 crc kubenswrapper[4926]: E1125 18:35:02.747081 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 18:35:02 crc kubenswrapper[4926]: E1125 18:35:02.749247 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 18:35:02 crc kubenswrapper[4926]: E1125 18:35:02.750633 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 18:35:02 crc kubenswrapper[4926]: E1125 18:35:02.750679 4926 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="83dbed29-b873-4c13-8dd2-82577a28bb81" containerName="nova-scheduler-scheduler" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.965973 4926 generic.go:334] "Generic (PLEG): container finished" podID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerID="8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776" exitCode=0 Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.966070 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63dc318-bb0d-492a-b59a-1944cc047b83","Type":"ContainerDied","Data":"8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776"} Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.966102 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63dc318-bb0d-492a-b59a-1944cc047b83","Type":"ContainerDied","Data":"de82a896b70c92ea16aa04ed0cddce5c874ab558629314d4074682089a2be9eb"} Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.966122 4926 scope.go:117] "RemoveContainer" containerID="8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.966237 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.972464 4926 generic.go:334] "Generic (PLEG): container finished" podID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerID="4abff4feba10cbe6d2f9e88339bb587e2a5c73a3ddc0898636154d41c420db3c" exitCode=0 Nov 25 18:35:02 crc kubenswrapper[4926]: I1125 18:35:02.972500 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee81242f-11f7-4a0d-83c7-1f97bbf41156","Type":"ContainerDied","Data":"4abff4feba10cbe6d2f9e88339bb587e2a5c73a3ddc0898636154d41c420db3c"} Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.000145 4926 scope.go:117] "RemoveContainer" containerID="5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.011921 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.038926 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.045053 4926 scope.go:117] "RemoveContainer" containerID="8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776" Nov 25 18:35:03 crc kubenswrapper[4926]: E1125 18:35:03.049881 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776\": container with ID starting with 8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776 not found: ID does not exist" containerID="8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.049930 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776"} err="failed to get container status \"8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776\": rpc error: code = NotFound desc = could not find container \"8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776\": container with ID starting with 8e3504188c888878d4f529e9e223cfe7f7ac12b45f789d40113da19c99f73776 not found: ID does not exist" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.049958 4926 scope.go:117] "RemoveContainer" containerID="5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028" Nov 25 18:35:03 crc kubenswrapper[4926]: E1125 18:35:03.050308 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028\": container with ID starting with 5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028 not found: ID does not exist" containerID="5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.050355 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028"} err="failed to get container status \"5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028\": rpc error: code = NotFound desc = could not find container \"5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028\": container with ID starting with 5d6164a6817c7842d2a7416d4638daf7ed9206a3654a26d6b3b0910927b33028 not found: ID does not exist" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.056313 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:35:03 crc kubenswrapper[4926]: E1125 18:35:03.056843 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-log" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.056863 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-log" Nov 25 18:35:03 crc kubenswrapper[4926]: E1125 18:35:03.056879 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-metadata" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.056886 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-metadata" Nov 25 18:35:03 crc kubenswrapper[4926]: E1125 18:35:03.056927 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="113b163c-c6d9-498d-9743-a53689445970" containerName="nova-manage" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.056933 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="113b163c-c6d9-498d-9743-a53689445970" containerName="nova-manage" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.057146 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="113b163c-c6d9-498d-9743-a53689445970" containerName="nova-manage" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.057171 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-log" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.057190 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" containerName="nova-metadata-metadata" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.058348 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.065587 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.065885 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.071089 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.083157 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-546sx\" (UniqueName: \"kubernetes.io/projected/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-kube-api-access-546sx\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.083257 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-config-data\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.083362 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.083424 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-logs\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.083450 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.185696 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-546sx\" (UniqueName: \"kubernetes.io/projected/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-kube-api-access-546sx\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.185774 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-config-data\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.185932 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.186109 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-logs\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.186176 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.186907 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-logs\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.190470 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-config-data\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.190534 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.192064 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.203979 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-546sx\" (UniqueName: \"kubernetes.io/projected/9b3135e8-5795-4aaf-9c02-10dc929bb4a3-kube-api-access-546sx\") pod \"nova-metadata-0\" (UID: \"9b3135e8-5795-4aaf-9c02-10dc929bb4a3\") " pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.245741 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.287618 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-combined-ca-bundle\") pod \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.287782 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzvt5\" (UniqueName: \"kubernetes.io/projected/ee81242f-11f7-4a0d-83c7-1f97bbf41156-kube-api-access-gzvt5\") pod \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.287834 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-config-data\") pod \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.287885 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-public-tls-certs\") pod \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.287920 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee81242f-11f7-4a0d-83c7-1f97bbf41156-logs\") pod \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.287957 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-internal-tls-certs\") pod \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\" (UID: \"ee81242f-11f7-4a0d-83c7-1f97bbf41156\") " Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.288681 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee81242f-11f7-4a0d-83c7-1f97bbf41156-logs" (OuterVolumeSpecName: "logs") pod "ee81242f-11f7-4a0d-83c7-1f97bbf41156" (UID: "ee81242f-11f7-4a0d-83c7-1f97bbf41156"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.296607 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee81242f-11f7-4a0d-83c7-1f97bbf41156-kube-api-access-gzvt5" (OuterVolumeSpecName: "kube-api-access-gzvt5") pod "ee81242f-11f7-4a0d-83c7-1f97bbf41156" (UID: "ee81242f-11f7-4a0d-83c7-1f97bbf41156"). InnerVolumeSpecName "kube-api-access-gzvt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.321225 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee81242f-11f7-4a0d-83c7-1f97bbf41156" (UID: "ee81242f-11f7-4a0d-83c7-1f97bbf41156"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.323353 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-config-data" (OuterVolumeSpecName: "config-data") pod "ee81242f-11f7-4a0d-83c7-1f97bbf41156" (UID: "ee81242f-11f7-4a0d-83c7-1f97bbf41156"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.346015 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ee81242f-11f7-4a0d-83c7-1f97bbf41156" (UID: "ee81242f-11f7-4a0d-83c7-1f97bbf41156"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.352039 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ee81242f-11f7-4a0d-83c7-1f97bbf41156" (UID: "ee81242f-11f7-4a0d-83c7-1f97bbf41156"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.390672 4926 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.390735 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee81242f-11f7-4a0d-83c7-1f97bbf41156-logs\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.390747 4926 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.390759 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.390769 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzvt5\" (UniqueName: \"kubernetes.io/projected/ee81242f-11f7-4a0d-83c7-1f97bbf41156-kube-api-access-gzvt5\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.390783 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee81242f-11f7-4a0d-83c7-1f97bbf41156-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.396909 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.542418 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.542834 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.863938 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.989706 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee81242f-11f7-4a0d-83c7-1f97bbf41156","Type":"ContainerDied","Data":"464d883aea7a376cf1ea159a1e4afce07faad5819cc9d042a1494fa33812e343"} Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.990107 4926 scope.go:117] "RemoveContainer" containerID="4abff4feba10cbe6d2f9e88339bb587e2a5c73a3ddc0898636154d41c420db3c" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.989768 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:35:03 crc kubenswrapper[4926]: I1125 18:35:03.996911 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9b3135e8-5795-4aaf-9c02-10dc929bb4a3","Type":"ContainerStarted","Data":"4fba00af3da4237dd3b1a17c3773aa2f46b3726a42002a6ccf1ae081c2528392"} Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.052773 4926 scope.go:117] "RemoveContainer" containerID="181ada5d5873ba8e5396a7f12bdcefe968c4b0e5d0c97eb08e1ba12e76f96b89" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.053902 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.076385 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.102442 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 18:35:04 crc kubenswrapper[4926]: E1125 18:35:04.102854 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-api" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.102871 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-api" Nov 25 18:35:04 crc kubenswrapper[4926]: E1125 18:35:04.102905 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-log" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.102913 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-log" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.103071 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-log" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.103096 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" containerName="nova-api-api" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.104155 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.106458 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.107477 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.107674 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.110888 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.206903 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da257cb5-92cf-405d-b671-ed2123802153-logs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.207167 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-internal-tls-certs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.207246 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-config-data\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.207373 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjsbk\" (UniqueName: \"kubernetes.io/projected/da257cb5-92cf-405d-b671-ed2123802153-kube-api-access-bjsbk\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.207497 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-public-tls-certs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.207571 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.309330 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjsbk\" (UniqueName: \"kubernetes.io/projected/da257cb5-92cf-405d-b671-ed2123802153-kube-api-access-bjsbk\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.309377 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-public-tls-certs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.309419 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.309502 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da257cb5-92cf-405d-b671-ed2123802153-logs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.309537 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-internal-tls-certs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.309555 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-config-data\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.310130 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da257cb5-92cf-405d-b671-ed2123802153-logs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.314961 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-internal-tls-certs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.317888 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-config-data\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.327882 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.327992 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da257cb5-92cf-405d-b671-ed2123802153-public-tls-certs\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.328946 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjsbk\" (UniqueName: \"kubernetes.io/projected/da257cb5-92cf-405d-b671-ed2123802153-kube-api-access-bjsbk\") pod \"nova-api-0\" (UID: \"da257cb5-92cf-405d-b671-ed2123802153\") " pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.341717 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d63dc318-bb0d-492a-b59a-1944cc047b83" path="/var/lib/kubelet/pods/d63dc318-bb0d-492a-b59a-1944cc047b83/volumes" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.342521 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee81242f-11f7-4a0d-83c7-1f97bbf41156" path="/var/lib/kubelet/pods/ee81242f-11f7-4a0d-83c7-1f97bbf41156/volumes" Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.430122 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 18:35:04 crc kubenswrapper[4926]: W1125 18:35:04.924220 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda257cb5_92cf_405d_b671_ed2123802153.slice/crio-80c538120fecf224e101281618d98921b0c86ca7c871876d45eaa3c8299dcdc7 WatchSource:0}: Error finding container 80c538120fecf224e101281618d98921b0c86ca7c871876d45eaa3c8299dcdc7: Status 404 returned error can't find the container with id 80c538120fecf224e101281618d98921b0c86ca7c871876d45eaa3c8299dcdc7 Nov 25 18:35:04 crc kubenswrapper[4926]: I1125 18:35:04.929095 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 18:35:05 crc kubenswrapper[4926]: I1125 18:35:05.010482 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da257cb5-92cf-405d-b671-ed2123802153","Type":"ContainerStarted","Data":"80c538120fecf224e101281618d98921b0c86ca7c871876d45eaa3c8299dcdc7"} Nov 25 18:35:05 crc kubenswrapper[4926]: I1125 18:35:05.018813 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9b3135e8-5795-4aaf-9c02-10dc929bb4a3","Type":"ContainerStarted","Data":"9b95be1bbb323f8fabaeee9db2f03b495a369118a7c8f6ce595750942bfee40d"} Nov 25 18:35:05 crc kubenswrapper[4926]: I1125 18:35:05.018873 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9b3135e8-5795-4aaf-9c02-10dc929bb4a3","Type":"ContainerStarted","Data":"21e138f7350b65ede9e0ba187383823c084fbbb6256c3e35d01fd4e326c53e7c"} Nov 25 18:35:05 crc kubenswrapper[4926]: I1125 18:35:05.056110 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.056085447 podStartE2EDuration="2.056085447s" podCreationTimestamp="2025-11-25 18:35:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:35:05.040427419 +0000 UTC m=+1335.425941034" watchObservedRunningTime="2025-11-25 18:35:05.056085447 +0000 UTC m=+1335.441599082" Nov 25 18:35:06 crc kubenswrapper[4926]: I1125 18:35:06.033568 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da257cb5-92cf-405d-b671-ed2123802153","Type":"ContainerStarted","Data":"67fdd10e419d884f5fba51dfd60b3acb45d504b6e60886ca71417a6d397311cc"} Nov 25 18:35:06 crc kubenswrapper[4926]: I1125 18:35:06.034199 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da257cb5-92cf-405d-b671-ed2123802153","Type":"ContainerStarted","Data":"f2399fc343fc7a75c53b0820d49446a5678facd6943049b1f05be1f368f217d3"} Nov 25 18:35:06 crc kubenswrapper[4926]: I1125 18:35:06.062449 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.062429345 podStartE2EDuration="2.062429345s" podCreationTimestamp="2025-11-25 18:35:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:35:06.051521456 +0000 UTC m=+1336.437035081" watchObservedRunningTime="2025-11-25 18:35:06.062429345 +0000 UTC m=+1336.447942950" Nov 25 18:35:06 crc kubenswrapper[4926]: I1125 18:35:06.959771 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.047125 4926 generic.go:334] "Generic (PLEG): container finished" podID="83dbed29-b873-4c13-8dd2-82577a28bb81" containerID="1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" exitCode=0 Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.047215 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.047236 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83dbed29-b873-4c13-8dd2-82577a28bb81","Type":"ContainerDied","Data":"1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e"} Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.047534 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83dbed29-b873-4c13-8dd2-82577a28bb81","Type":"ContainerDied","Data":"c9d5e9e2ac13ae9d6c9b4cc467c7f78347901ef8bfdb4921cd1bc30d65f96fe1"} Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.047571 4926 scope.go:117] "RemoveContainer" containerID="1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.078712 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-config-data\") pod \"83dbed29-b873-4c13-8dd2-82577a28bb81\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.078831 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-combined-ca-bundle\") pod \"83dbed29-b873-4c13-8dd2-82577a28bb81\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.078967 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgdjq\" (UniqueName: \"kubernetes.io/projected/83dbed29-b873-4c13-8dd2-82577a28bb81-kube-api-access-vgdjq\") pod \"83dbed29-b873-4c13-8dd2-82577a28bb81\" (UID: \"83dbed29-b873-4c13-8dd2-82577a28bb81\") " Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.100191 4926 scope.go:117] "RemoveContainer" containerID="1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" Nov 25 18:35:07 crc kubenswrapper[4926]: E1125 18:35:07.100624 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e\": container with ID starting with 1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e not found: ID does not exist" containerID="1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.100667 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e"} err="failed to get container status \"1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e\": rpc error: code = NotFound desc = could not find container \"1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e\": container with ID starting with 1d3a2b448c21c95d8a909a2d49588f78fd4a174bd14b5504bfb982df0342177e not found: ID does not exist" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.101526 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83dbed29-b873-4c13-8dd2-82577a28bb81-kube-api-access-vgdjq" (OuterVolumeSpecName: "kube-api-access-vgdjq") pod "83dbed29-b873-4c13-8dd2-82577a28bb81" (UID: "83dbed29-b873-4c13-8dd2-82577a28bb81"). InnerVolumeSpecName "kube-api-access-vgdjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.110427 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83dbed29-b873-4c13-8dd2-82577a28bb81" (UID: "83dbed29-b873-4c13-8dd2-82577a28bb81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.118944 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-config-data" (OuterVolumeSpecName: "config-data") pod "83dbed29-b873-4c13-8dd2-82577a28bb81" (UID: "83dbed29-b873-4c13-8dd2-82577a28bb81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.181401 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.181436 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83dbed29-b873-4c13-8dd2-82577a28bb81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.181446 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgdjq\" (UniqueName: \"kubernetes.io/projected/83dbed29-b873-4c13-8dd2-82577a28bb81-kube-api-access-vgdjq\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.389633 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.401264 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.435480 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:35:07 crc kubenswrapper[4926]: E1125 18:35:07.437247 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83dbed29-b873-4c13-8dd2-82577a28bb81" containerName="nova-scheduler-scheduler" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.437270 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="83dbed29-b873-4c13-8dd2-82577a28bb81" containerName="nova-scheduler-scheduler" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.438471 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="83dbed29-b873-4c13-8dd2-82577a28bb81" containerName="nova-scheduler-scheduler" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.440878 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.447405 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.449910 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.589554 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33cc50f8-8d20-4fa6-a697-2e508b70f929-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.589835 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33cc50f8-8d20-4fa6-a697-2e508b70f929-config-data\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.590188 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz9rk\" (UniqueName: \"kubernetes.io/projected/33cc50f8-8d20-4fa6-a697-2e508b70f929-kube-api-access-gz9rk\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.692719 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33cc50f8-8d20-4fa6-a697-2e508b70f929-config-data\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.692847 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz9rk\" (UniqueName: \"kubernetes.io/projected/33cc50f8-8d20-4fa6-a697-2e508b70f929-kube-api-access-gz9rk\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.692911 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33cc50f8-8d20-4fa6-a697-2e508b70f929-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.700093 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33cc50f8-8d20-4fa6-a697-2e508b70f929-config-data\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.701026 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33cc50f8-8d20-4fa6-a697-2e508b70f929-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.710536 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz9rk\" (UniqueName: \"kubernetes.io/projected/33cc50f8-8d20-4fa6-a697-2e508b70f929-kube-api-access-gz9rk\") pod \"nova-scheduler-0\" (UID: \"33cc50f8-8d20-4fa6-a697-2e508b70f929\") " pod="openstack/nova-scheduler-0" Nov 25 18:35:07 crc kubenswrapper[4926]: I1125 18:35:07.767398 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 18:35:08 crc kubenswrapper[4926]: I1125 18:35:08.098323 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 18:35:08 crc kubenswrapper[4926]: W1125 18:35:08.103633 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33cc50f8_8d20_4fa6_a697_2e508b70f929.slice/crio-aab585a951780dc821ed48c4d75f72e6d35e8b20b56f627ce5b9d9b49ef5054e WatchSource:0}: Error finding container aab585a951780dc821ed48c4d75f72e6d35e8b20b56f627ce5b9d9b49ef5054e: Status 404 returned error can't find the container with id aab585a951780dc821ed48c4d75f72e6d35e8b20b56f627ce5b9d9b49ef5054e Nov 25 18:35:08 crc kubenswrapper[4926]: I1125 18:35:08.341647 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83dbed29-b873-4c13-8dd2-82577a28bb81" path="/var/lib/kubelet/pods/83dbed29-b873-4c13-8dd2-82577a28bb81/volumes" Nov 25 18:35:08 crc kubenswrapper[4926]: I1125 18:35:08.397954 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 18:35:08 crc kubenswrapper[4926]: I1125 18:35:08.399448 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 18:35:09 crc kubenswrapper[4926]: I1125 18:35:09.077361 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"33cc50f8-8d20-4fa6-a697-2e508b70f929","Type":"ContainerStarted","Data":"fb02fe4f6840c1298c3ec7e8f4f2cf70de98d7be1fde80fca0dc262422894344"} Nov 25 18:35:09 crc kubenswrapper[4926]: I1125 18:35:09.078092 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"33cc50f8-8d20-4fa6-a697-2e508b70f929","Type":"ContainerStarted","Data":"aab585a951780dc821ed48c4d75f72e6d35e8b20b56f627ce5b9d9b49ef5054e"} Nov 25 18:35:09 crc kubenswrapper[4926]: I1125 18:35:09.102666 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.102641237 podStartE2EDuration="2.102641237s" podCreationTimestamp="2025-11-25 18:35:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:35:09.090159309 +0000 UTC m=+1339.475672944" watchObservedRunningTime="2025-11-25 18:35:09.102641237 +0000 UTC m=+1339.488154862" Nov 25 18:35:12 crc kubenswrapper[4926]: I1125 18:35:12.767685 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 18:35:13 crc kubenswrapper[4926]: I1125 18:35:13.397545 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 18:35:13 crc kubenswrapper[4926]: I1125 18:35:13.398046 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 18:35:14 crc kubenswrapper[4926]: I1125 18:35:14.415596 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9b3135e8-5795-4aaf-9c02-10dc929bb4a3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 18:35:14 crc kubenswrapper[4926]: I1125 18:35:14.415847 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9b3135e8-5795-4aaf-9c02-10dc929bb4a3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 18:35:14 crc kubenswrapper[4926]: I1125 18:35:14.431430 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:35:14 crc kubenswrapper[4926]: I1125 18:35:14.431493 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 18:35:15 crc kubenswrapper[4926]: I1125 18:35:15.448480 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="da257cb5-92cf-405d-b671-ed2123802153" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 18:35:15 crc kubenswrapper[4926]: I1125 18:35:15.448478 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="da257cb5-92cf-405d-b671-ed2123802153" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 18:35:17 crc kubenswrapper[4926]: I1125 18:35:17.768067 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 18:35:17 crc kubenswrapper[4926]: I1125 18:35:17.805580 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 18:35:18 crc kubenswrapper[4926]: I1125 18:35:18.223175 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 18:35:23 crc kubenswrapper[4926]: I1125 18:35:23.245899 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 18:35:23 crc kubenswrapper[4926]: I1125 18:35:23.402617 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 18:35:23 crc kubenswrapper[4926]: I1125 18:35:23.402748 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 18:35:23 crc kubenswrapper[4926]: I1125 18:35:23.409280 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 18:35:23 crc kubenswrapper[4926]: I1125 18:35:23.409891 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 18:35:24 crc kubenswrapper[4926]: I1125 18:35:24.443558 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 18:35:24 crc kubenswrapper[4926]: I1125 18:35:24.444153 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 18:35:24 crc kubenswrapper[4926]: I1125 18:35:24.463477 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 18:35:24 crc kubenswrapper[4926]: I1125 18:35:24.472053 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 18:35:25 crc kubenswrapper[4926]: I1125 18:35:25.298935 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 18:35:25 crc kubenswrapper[4926]: I1125 18:35:25.308433 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 18:35:33 crc kubenswrapper[4926]: I1125 18:35:33.541799 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:35:33 crc kubenswrapper[4926]: I1125 18:35:33.543466 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:35:33 crc kubenswrapper[4926]: I1125 18:35:33.543612 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:35:33 crc kubenswrapper[4926]: I1125 18:35:33.544447 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a8f56314785fa968a3a105a23d5d2b50a67b5ca02a86eb00cd6083866de84208"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:35:33 crc kubenswrapper[4926]: I1125 18:35:33.544557 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://a8f56314785fa968a3a105a23d5d2b50a67b5ca02a86eb00cd6083866de84208" gracePeriod=600 Nov 25 18:35:34 crc kubenswrapper[4926]: I1125 18:35:34.415560 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="a8f56314785fa968a3a105a23d5d2b50a67b5ca02a86eb00cd6083866de84208" exitCode=0 Nov 25 18:35:34 crc kubenswrapper[4926]: I1125 18:35:34.415653 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"a8f56314785fa968a3a105a23d5d2b50a67b5ca02a86eb00cd6083866de84208"} Nov 25 18:35:34 crc kubenswrapper[4926]: I1125 18:35:34.416207 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2"} Nov 25 18:35:34 crc kubenswrapper[4926]: I1125 18:35:34.416244 4926 scope.go:117] "RemoveContainer" containerID="07a87485769ab3c18f3d0de8b8428276c4f53380d423cecc3238c93bfce01c6d" Nov 25 18:35:35 crc kubenswrapper[4926]: I1125 18:35:35.078702 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:35:36 crc kubenswrapper[4926]: I1125 18:35:36.129036 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:35:38 crc kubenswrapper[4926]: I1125 18:35:38.831469 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerName="rabbitmq" containerID="cri-o://0ffadf5eeb24a99e8f20e8a35f8dd76979e0e26d8eb368c5700951774a590eb2" gracePeriod=604797 Nov 25 18:35:39 crc kubenswrapper[4926]: I1125 18:35:39.496066 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerName="rabbitmq" containerID="cri-o://12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989" gracePeriod=604797 Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.501119 4926 generic.go:334] "Generic (PLEG): container finished" podID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerID="0ffadf5eeb24a99e8f20e8a35f8dd76979e0e26d8eb368c5700951774a590eb2" exitCode=0 Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.501198 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"38e40083-2404-4c67-88b5-41ccaf693c6e","Type":"ContainerDied","Data":"0ffadf5eeb24a99e8f20e8a35f8dd76979e0e26d8eb368c5700951774a590eb2"} Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.501486 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"38e40083-2404-4c67-88b5-41ccaf693c6e","Type":"ContainerDied","Data":"c536dea4f99cf17d00fc396bf472640854de9a24d33b3c83d3273913009aa3cd"} Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.501503 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c536dea4f99cf17d00fc396bf472640854de9a24d33b3c83d3273913009aa3cd" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.507338 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.631101 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-server-conf\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.631174 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-config-data\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.631301 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632130 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/38e40083-2404-4c67-88b5-41ccaf693c6e-pod-info\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632219 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndt9m\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-kube-api-access-ndt9m\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632257 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-plugins\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632284 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-erlang-cookie\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632325 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-confd\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632399 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-tls\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632426 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-plugins-conf\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.632461 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/38e40083-2404-4c67-88b5-41ccaf693c6e-erlang-cookie-secret\") pod \"38e40083-2404-4c67-88b5-41ccaf693c6e\" (UID: \"38e40083-2404-4c67-88b5-41ccaf693c6e\") " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.635106 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.637312 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.641242 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e40083-2404-4c67-88b5-41ccaf693c6e-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.643537 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/38e40083-2404-4c67-88b5-41ccaf693c6e-pod-info" (OuterVolumeSpecName: "pod-info") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.645420 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.648614 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.660647 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-kube-api-access-ndt9m" (OuterVolumeSpecName: "kube-api-access-ndt9m") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "kube-api-access-ndt9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.667596 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.693502 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-config-data" (OuterVolumeSpecName: "config-data") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738124 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738164 4926 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738176 4926 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/38e40083-2404-4c67-88b5-41ccaf693c6e-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738188 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738216 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738228 4926 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/38e40083-2404-4c67-88b5-41ccaf693c6e-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738241 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndt9m\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-kube-api-access-ndt9m\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738253 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.738264 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.850856 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.864700 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-server-conf" (OuterVolumeSpecName: "server-conf") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.880595 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "38e40083-2404-4c67-88b5-41ccaf693c6e" (UID: "38e40083-2404-4c67-88b5-41ccaf693c6e"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.952116 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.952170 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/38e40083-2404-4c67-88b5-41ccaf693c6e-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:40 crc kubenswrapper[4926]: I1125 18:35:40.952183 4926 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/38e40083-2404-4c67-88b5-41ccaf693c6e-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.177267 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375147 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-server-conf\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375191 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375217 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8310425-a9bc-4c42-9caf-9c1a70041d2c-erlang-cookie-secret\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375262 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-tls\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375336 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-plugins\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375381 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-plugins-conf\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375470 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8310425-a9bc-4c42-9caf-9c1a70041d2c-pod-info\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375499 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-config-data\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375519 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-erlang-cookie\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375576 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-confd\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375622 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xb9r6\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-kube-api-access-xb9r6\") pod \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\" (UID: \"c8310425-a9bc-4c42-9caf-9c1a70041d2c\") " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.375785 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.376023 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.381295 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.381586 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.387548 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8310425-a9bc-4c42-9caf-9c1a70041d2c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.387839 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.387948 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c8310425-a9bc-4c42-9caf-9c1a70041d2c-pod-info" (OuterVolumeSpecName: "pod-info") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.387987 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.389087 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-kube-api-access-xb9r6" (OuterVolumeSpecName: "kube-api-access-xb9r6") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "kube-api-access-xb9r6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.417134 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-config-data" (OuterVolumeSpecName: "config-data") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.471353 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-server-conf" (OuterVolumeSpecName: "server-conf") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.478824 4926 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8310425-a9bc-4c42-9caf-9c1a70041d2c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.478878 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.480113 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.480157 4926 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.480170 4926 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8310425-a9bc-4c42-9caf-9c1a70041d2c-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.480182 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.480194 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.480208 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xb9r6\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-kube-api-access-xb9r6\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.480223 4926 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8310425-a9bc-4c42-9caf-9c1a70041d2c-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.509712 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.524416 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerID="12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989" exitCode=0 Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.524514 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.527614 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.527856 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8310425-a9bc-4c42-9caf-9c1a70041d2c","Type":"ContainerDied","Data":"12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989"} Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.527896 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8310425-a9bc-4c42-9caf-9c1a70041d2c","Type":"ContainerDied","Data":"fe9c0e0519b8553cbc9abb2edfecb2d4efc4f4d113213ecb0d36a645ce8213eb"} Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.527919 4926 scope.go:117] "RemoveContainer" containerID="12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.539593 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c8310425-a9bc-4c42-9caf-9c1a70041d2c" (UID: "c8310425-a9bc-4c42-9caf-9c1a70041d2c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.566160 4926 scope.go:117] "RemoveContainer" containerID="4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.568926 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.578568 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.582700 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8310425-a9bc-4c42-9caf-9c1a70041d2c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.582732 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.588475 4926 scope.go:117] "RemoveContainer" containerID="12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989" Nov 25 18:35:41 crc kubenswrapper[4926]: E1125 18:35:41.588873 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989\": container with ID starting with 12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989 not found: ID does not exist" containerID="12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.588904 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989"} err="failed to get container status \"12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989\": rpc error: code = NotFound desc = could not find container \"12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989\": container with ID starting with 12410c3a32cceaa53b4d69d0f4f5ede3e5f084a6be8b8c75e427f98782818989 not found: ID does not exist" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.588934 4926 scope.go:117] "RemoveContainer" containerID="4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84" Nov 25 18:35:41 crc kubenswrapper[4926]: E1125 18:35:41.593404 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84\": container with ID starting with 4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84 not found: ID does not exist" containerID="4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.593470 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84"} err="failed to get container status \"4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84\": rpc error: code = NotFound desc = could not find container \"4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84\": container with ID starting with 4b1e089befb3bee2345512289429251ee14e8e4155f1bf797ba5441e61e31b84 not found: ID does not exist" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.598859 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:35:41 crc kubenswrapper[4926]: E1125 18:35:41.599304 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerName="setup-container" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.599327 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerName="setup-container" Nov 25 18:35:41 crc kubenswrapper[4926]: E1125 18:35:41.599337 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerName="setup-container" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.599344 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerName="setup-container" Nov 25 18:35:41 crc kubenswrapper[4926]: E1125 18:35:41.599384 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerName="rabbitmq" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.599390 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerName="rabbitmq" Nov 25 18:35:41 crc kubenswrapper[4926]: E1125 18:35:41.599404 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerName="rabbitmq" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.599410 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerName="rabbitmq" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.599592 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" containerName="rabbitmq" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.599622 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" containerName="rabbitmq" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.600710 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.610810 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.611090 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.611151 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.611321 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.611109 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.612547 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-rr2qd" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.612722 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.622672 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.788662 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c9470d78-c381-4be0-b06e-13e3f97422ac-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.788746 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789021 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c9470d78-c381-4be0-b06e-13e3f97422ac-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789438 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789516 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789550 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789584 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789733 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789772 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789802 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-config-data\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.789828 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfxzc\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-kube-api-access-wfxzc\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.870783 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.890248 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891344 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c9470d78-c381-4be0-b06e-13e3f97422ac-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891472 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891511 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c9470d78-c381-4be0-b06e-13e3f97422ac-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891569 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891596 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891612 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891629 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891650 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891675 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891691 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-config-data\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.891711 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfxzc\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-kube-api-access-wfxzc\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.893332 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.893977 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.896944 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c9470d78-c381-4be0-b06e-13e3f97422ac-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.898526 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.898896 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c9470d78-c381-4be0-b06e-13e3f97422ac-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.899133 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.899845 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-config-data\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.901931 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c9470d78-c381-4be0-b06e-13e3f97422ac-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.904697 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.905635 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.914404 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfxzc\" (UniqueName: \"kubernetes.io/projected/c9470d78-c381-4be0-b06e-13e3f97422ac-kube-api-access-wfxzc\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.930816 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.932686 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.951127 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.951406 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.951535 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-vvtzv" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.951553 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.951648 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.951729 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.951840 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 18:35:41 crc kubenswrapper[4926]: I1125 18:35:41.977518 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.007139 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c9470d78-c381-4be0-b06e-13e3f97422ac\") " pod="openstack/rabbitmq-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102435 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102540 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102557 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102594 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102626 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102646 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/249e603a-e4df-4b46-941d-ab40c5374c95-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102663 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102684 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102699 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102725 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gmb4\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-kube-api-access-6gmb4\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.102779 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/249e603a-e4df-4b46-941d-ab40c5374c95-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204093 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204140 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204193 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204229 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204248 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204264 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/249e603a-e4df-4b46-941d-ab40c5374c95-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204283 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204301 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204327 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gmb4\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-kube-api-access-6gmb4\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204408 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/249e603a-e4df-4b46-941d-ab40c5374c95-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204432 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.204779 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.205053 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.206275 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.206341 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.207389 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.211094 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/249e603a-e4df-4b46-941d-ab40c5374c95-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.211197 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.211777 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/249e603a-e4df-4b46-941d-ab40c5374c95-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.213336 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.220126 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.224728 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gmb4\" (UniqueName: \"kubernetes.io/projected/249e603a-e4df-4b46-941d-ab40c5374c95-kube-api-access-6gmb4\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.224859 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/249e603a-e4df-4b46-941d-ab40c5374c95-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.267595 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"249e603a-e4df-4b46-941d-ab40c5374c95\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.372511 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38e40083-2404-4c67-88b5-41ccaf693c6e" path="/var/lib/kubelet/pods/38e40083-2404-4c67-88b5-41ccaf693c6e/volumes" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.373602 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8310425-a9bc-4c42-9caf-9c1a70041d2c" path="/var/lib/kubelet/pods/c8310425-a9bc-4c42-9caf-9c1a70041d2c/volumes" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.567538 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:35:42 crc kubenswrapper[4926]: I1125 18:35:42.723637 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 18:35:43 crc kubenswrapper[4926]: I1125 18:35:43.059107 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 18:35:43 crc kubenswrapper[4926]: I1125 18:35:43.549271 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"249e603a-e4df-4b46-941d-ab40c5374c95","Type":"ContainerStarted","Data":"41c4bf27a57ca7d4bbe4d198bd75482593894bea3036c9dc2f8ad03b5fc33c26"} Nov 25 18:35:43 crc kubenswrapper[4926]: I1125 18:35:43.550357 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c9470d78-c381-4be0-b06e-13e3f97422ac","Type":"ContainerStarted","Data":"24d9e478adec58c3e602f08ce46664e602e151d9f0c45248851bbee559e1c5ec"} Nov 25 18:35:45 crc kubenswrapper[4926]: I1125 18:35:45.579980 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c9470d78-c381-4be0-b06e-13e3f97422ac","Type":"ContainerStarted","Data":"efc14dbfed79ced79fa41851fd54bfa3be28f09f2ed78c5211680daa9a967e6b"} Nov 25 18:35:46 crc kubenswrapper[4926]: I1125 18:35:46.619333 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"249e603a-e4df-4b46-941d-ab40c5374c95","Type":"ContainerStarted","Data":"5f2597c24d2c861b74b3cef01c37be36e5d85bc2b30612e2a91a841b53d5cf9c"} Nov 25 18:35:51 crc kubenswrapper[4926]: I1125 18:35:51.888317 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cd46dfcd7-qrkdx"] Nov 25 18:35:51 crc kubenswrapper[4926]: I1125 18:35:51.893132 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:51 crc kubenswrapper[4926]: I1125 18:35:51.910324 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 18:35:51 crc kubenswrapper[4926]: I1125 18:35:51.920437 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd46dfcd7-qrkdx"] Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.040245 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.040313 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.040340 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.040405 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbtjr\" (UniqueName: \"kubernetes.io/projected/f69fd537-3678-4c91-b9c0-6d301dd474fa-kube-api-access-nbtjr\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.040438 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-config\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.040472 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.040516 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-svc\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.141959 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.142025 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.142055 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.142103 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbtjr\" (UniqueName: \"kubernetes.io/projected/f69fd537-3678-4c91-b9c0-6d301dd474fa-kube-api-access-nbtjr\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.142135 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-config\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.142170 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.142205 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-svc\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.143283 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-svc\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.143984 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.144666 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.145316 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.146492 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.147068 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-config\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.167596 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbtjr\" (UniqueName: \"kubernetes.io/projected/f69fd537-3678-4c91-b9c0-6d301dd474fa-kube-api-access-nbtjr\") pod \"dnsmasq-dns-6cd46dfcd7-qrkdx\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.224694 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:52 crc kubenswrapper[4926]: I1125 18:35:52.812064 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd46dfcd7-qrkdx"] Nov 25 18:35:52 crc kubenswrapper[4926]: W1125 18:35:52.824932 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf69fd537_3678_4c91_b9c0_6d301dd474fa.slice/crio-08214ed0cade526730f2876541cc082500610c88821470e94c33730faa9f4940 WatchSource:0}: Error finding container 08214ed0cade526730f2876541cc082500610c88821470e94c33730faa9f4940: Status 404 returned error can't find the container with id 08214ed0cade526730f2876541cc082500610c88821470e94c33730faa9f4940 Nov 25 18:35:53 crc kubenswrapper[4926]: I1125 18:35:53.688040 4926 generic.go:334] "Generic (PLEG): container finished" podID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerID="450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07" exitCode=0 Nov 25 18:35:53 crc kubenswrapper[4926]: I1125 18:35:53.688106 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" event={"ID":"f69fd537-3678-4c91-b9c0-6d301dd474fa","Type":"ContainerDied","Data":"450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07"} Nov 25 18:35:53 crc kubenswrapper[4926]: I1125 18:35:53.688137 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" event={"ID":"f69fd537-3678-4c91-b9c0-6d301dd474fa","Type":"ContainerStarted","Data":"08214ed0cade526730f2876541cc082500610c88821470e94c33730faa9f4940"} Nov 25 18:35:54 crc kubenswrapper[4926]: I1125 18:35:54.705797 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" event={"ID":"f69fd537-3678-4c91-b9c0-6d301dd474fa","Type":"ContainerStarted","Data":"e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572"} Nov 25 18:35:54 crc kubenswrapper[4926]: I1125 18:35:54.706276 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:35:54 crc kubenswrapper[4926]: I1125 18:35:54.751731 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" podStartSLOduration=3.751700872 podStartE2EDuration="3.751700872s" podCreationTimestamp="2025-11-25 18:35:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:35:54.734749711 +0000 UTC m=+1385.120263326" watchObservedRunningTime="2025-11-25 18:35:54.751700872 +0000 UTC m=+1385.137214507" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.226679 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.329896 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b7d54555f-9d6bc"] Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.330615 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" podUID="d25d769e-8387-460f-806f-065854877f2c" containerName="dnsmasq-dns" containerID="cri-o://b2237bb72ad9e3d708536c2e505799168709ae436342e22fa31c6853c52e1eb1" gracePeriod=10 Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.619757 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86b86f76c-cbrkb"] Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.622001 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.632512 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86b86f76c-cbrkb"] Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.709950 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-dns-svc\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.709995 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-ovsdbserver-nb\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.710051 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-ovsdbserver-sb\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.710074 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-dns-swift-storage-0\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.710127 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-config\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.710157 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4fbh\" (UniqueName: \"kubernetes.io/projected/3abe3008-a4ac-4efe-b063-0234232afac3-kube-api-access-q4fbh\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.710190 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-openstack-edpm-ipam\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.811583 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-dns-svc\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.811629 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-ovsdbserver-nb\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.811693 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-ovsdbserver-sb\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.811724 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-dns-swift-storage-0\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.811785 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-config\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.811827 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4fbh\" (UniqueName: \"kubernetes.io/projected/3abe3008-a4ac-4efe-b063-0234232afac3-kube-api-access-q4fbh\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.811872 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-openstack-edpm-ipam\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.813440 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-openstack-edpm-ipam\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.813456 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-config\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.813730 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-dns-svc\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.813766 4926 generic.go:334] "Generic (PLEG): container finished" podID="d25d769e-8387-460f-806f-065854877f2c" containerID="b2237bb72ad9e3d708536c2e505799168709ae436342e22fa31c6853c52e1eb1" exitCode=0 Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.813776 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-ovsdbserver-nb\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.813800 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" event={"ID":"d25d769e-8387-460f-806f-065854877f2c","Type":"ContainerDied","Data":"b2237bb72ad9e3d708536c2e505799168709ae436342e22fa31c6853c52e1eb1"} Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.814683 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-dns-swift-storage-0\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.816634 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3abe3008-a4ac-4efe-b063-0234232afac3-ovsdbserver-sb\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.846423 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4fbh\" (UniqueName: \"kubernetes.io/projected/3abe3008-a4ac-4efe-b063-0234232afac3-kube-api-access-q4fbh\") pod \"dnsmasq-dns-86b86f76c-cbrkb\" (UID: \"3abe3008-a4ac-4efe-b063-0234232afac3\") " pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.940191 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:36:02 crc kubenswrapper[4926]: I1125 18:36:02.952958 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.016401 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-config\") pod \"d25d769e-8387-460f-806f-065854877f2c\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.016645 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-sb\") pod \"d25d769e-8387-460f-806f-065854877f2c\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.016735 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-nb\") pod \"d25d769e-8387-460f-806f-065854877f2c\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.016774 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glxvm\" (UniqueName: \"kubernetes.io/projected/d25d769e-8387-460f-806f-065854877f2c-kube-api-access-glxvm\") pod \"d25d769e-8387-460f-806f-065854877f2c\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.016839 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-swift-storage-0\") pod \"d25d769e-8387-460f-806f-065854877f2c\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.016890 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-svc\") pod \"d25d769e-8387-460f-806f-065854877f2c\" (UID: \"d25d769e-8387-460f-806f-065854877f2c\") " Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.021451 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d25d769e-8387-460f-806f-065854877f2c-kube-api-access-glxvm" (OuterVolumeSpecName: "kube-api-access-glxvm") pod "d25d769e-8387-460f-806f-065854877f2c" (UID: "d25d769e-8387-460f-806f-065854877f2c"). InnerVolumeSpecName "kube-api-access-glxvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.077960 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d25d769e-8387-460f-806f-065854877f2c" (UID: "d25d769e-8387-460f-806f-065854877f2c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.085289 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d25d769e-8387-460f-806f-065854877f2c" (UID: "d25d769e-8387-460f-806f-065854877f2c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.086186 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d25d769e-8387-460f-806f-065854877f2c" (UID: "d25d769e-8387-460f-806f-065854877f2c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.103008 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d25d769e-8387-460f-806f-065854877f2c" (UID: "d25d769e-8387-460f-806f-065854877f2c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.109870 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-config" (OuterVolumeSpecName: "config") pod "d25d769e-8387-460f-806f-065854877f2c" (UID: "d25d769e-8387-460f-806f-065854877f2c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.118797 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.118825 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.118852 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glxvm\" (UniqueName: \"kubernetes.io/projected/d25d769e-8387-460f-806f-065854877f2c-kube-api-access-glxvm\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.118869 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.118882 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.118892 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d25d769e-8387-460f-806f-065854877f2c-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.527796 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86b86f76c-cbrkb"] Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.827213 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" event={"ID":"d25d769e-8387-460f-806f-065854877f2c","Type":"ContainerDied","Data":"305e3f03644a2ee91991fc9f0b83452e2d3d953b3414cdca595977c97759c2b9"} Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.827277 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b7d54555f-9d6bc" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.827699 4926 scope.go:117] "RemoveContainer" containerID="b2237bb72ad9e3d708536c2e505799168709ae436342e22fa31c6853c52e1eb1" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.831535 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" event={"ID":"3abe3008-a4ac-4efe-b063-0234232afac3","Type":"ContainerStarted","Data":"70a44e13ac1e90943ca1c51cacf16df2679f415d077c611de9a688381dd43976"} Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.867765 4926 scope.go:117] "RemoveContainer" containerID="0dbc577f20d1f69c6b3ea088bf40ab4cd1449fa1a59891b1c16cb158a4cdccf8" Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.875464 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b7d54555f-9d6bc"] Nov 25 18:36:03 crc kubenswrapper[4926]: I1125 18:36:03.885828 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b7d54555f-9d6bc"] Nov 25 18:36:04 crc kubenswrapper[4926]: I1125 18:36:04.340525 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d25d769e-8387-460f-806f-065854877f2c" path="/var/lib/kubelet/pods/d25d769e-8387-460f-806f-065854877f2c/volumes" Nov 25 18:36:04 crc kubenswrapper[4926]: I1125 18:36:04.844477 4926 generic.go:334] "Generic (PLEG): container finished" podID="3abe3008-a4ac-4efe-b063-0234232afac3" containerID="f34eeb9229e307ee4e9de1225226f4da1c72a7948b6ee849dae0e5be79dabc4e" exitCode=0 Nov 25 18:36:04 crc kubenswrapper[4926]: I1125 18:36:04.844643 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" event={"ID":"3abe3008-a4ac-4efe-b063-0234232afac3","Type":"ContainerDied","Data":"f34eeb9229e307ee4e9de1225226f4da1c72a7948b6ee849dae0e5be79dabc4e"} Nov 25 18:36:05 crc kubenswrapper[4926]: I1125 18:36:05.861748 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" event={"ID":"3abe3008-a4ac-4efe-b063-0234232afac3","Type":"ContainerStarted","Data":"2c5d1d731164cfe5ff1e70da3f68d325f081fdf3df658aca69c7f808353b7a97"} Nov 25 18:36:05 crc kubenswrapper[4926]: I1125 18:36:05.862086 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:05 crc kubenswrapper[4926]: I1125 18:36:05.899553 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" podStartSLOduration=3.899526885 podStartE2EDuration="3.899526885s" podCreationTimestamp="2025-11-25 18:36:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:36:05.897177004 +0000 UTC m=+1396.282690649" watchObservedRunningTime="2025-11-25 18:36:05.899526885 +0000 UTC m=+1396.285040520" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.237300 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p7ljv"] Nov 25 18:36:07 crc kubenswrapper[4926]: E1125 18:36:07.238292 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d25d769e-8387-460f-806f-065854877f2c" containerName="dnsmasq-dns" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.238316 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d25d769e-8387-460f-806f-065854877f2c" containerName="dnsmasq-dns" Nov 25 18:36:07 crc kubenswrapper[4926]: E1125 18:36:07.238334 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d25d769e-8387-460f-806f-065854877f2c" containerName="init" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.238347 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d25d769e-8387-460f-806f-065854877f2c" containerName="init" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.238793 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d25d769e-8387-460f-806f-065854877f2c" containerName="dnsmasq-dns" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.241603 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.250147 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7ljv"] Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.314005 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnw6z\" (UniqueName: \"kubernetes.io/projected/34213808-cec6-436d-8337-c04e52526839-kube-api-access-cnw6z\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.314352 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-catalog-content\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.314530 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-utilities\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.416985 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnw6z\" (UniqueName: \"kubernetes.io/projected/34213808-cec6-436d-8337-c04e52526839-kube-api-access-cnw6z\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.417054 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-catalog-content\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.417190 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-utilities\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.417930 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-catalog-content\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.418149 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-utilities\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.457322 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnw6z\" (UniqueName: \"kubernetes.io/projected/34213808-cec6-436d-8337-c04e52526839-kube-api-access-cnw6z\") pod \"redhat-operators-p7ljv\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:07 crc kubenswrapper[4926]: I1125 18:36:07.573959 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:08 crc kubenswrapper[4926]: W1125 18:36:08.112939 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34213808_cec6_436d_8337_c04e52526839.slice/crio-578f2b28582f1307f04cff0bd80c2e40c9f0cb5e88c056f6535c9ed54de95a60 WatchSource:0}: Error finding container 578f2b28582f1307f04cff0bd80c2e40c9f0cb5e88c056f6535c9ed54de95a60: Status 404 returned error can't find the container with id 578f2b28582f1307f04cff0bd80c2e40c9f0cb5e88c056f6535c9ed54de95a60 Nov 25 18:36:08 crc kubenswrapper[4926]: I1125 18:36:08.117599 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7ljv"] Nov 25 18:36:08 crc kubenswrapper[4926]: I1125 18:36:08.899269 4926 generic.go:334] "Generic (PLEG): container finished" podID="34213808-cec6-436d-8337-c04e52526839" containerID="988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648" exitCode=0 Nov 25 18:36:08 crc kubenswrapper[4926]: I1125 18:36:08.899338 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7ljv" event={"ID":"34213808-cec6-436d-8337-c04e52526839","Type":"ContainerDied","Data":"988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648"} Nov 25 18:36:08 crc kubenswrapper[4926]: I1125 18:36:08.899909 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7ljv" event={"ID":"34213808-cec6-436d-8337-c04e52526839","Type":"ContainerStarted","Data":"578f2b28582f1307f04cff0bd80c2e40c9f0cb5e88c056f6535c9ed54de95a60"} Nov 25 18:36:10 crc kubenswrapper[4926]: I1125 18:36:10.924306 4926 generic.go:334] "Generic (PLEG): container finished" podID="34213808-cec6-436d-8337-c04e52526839" containerID="570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e" exitCode=0 Nov 25 18:36:10 crc kubenswrapper[4926]: I1125 18:36:10.924438 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7ljv" event={"ID":"34213808-cec6-436d-8337-c04e52526839","Type":"ContainerDied","Data":"570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e"} Nov 25 18:36:11 crc kubenswrapper[4926]: I1125 18:36:11.946466 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7ljv" event={"ID":"34213808-cec6-436d-8337-c04e52526839","Type":"ContainerStarted","Data":"345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd"} Nov 25 18:36:11 crc kubenswrapper[4926]: I1125 18:36:11.976991 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p7ljv" podStartSLOduration=2.414821303 podStartE2EDuration="4.976969975s" podCreationTimestamp="2025-11-25 18:36:07 +0000 UTC" firstStartedPulling="2025-11-25 18:36:08.901821264 +0000 UTC m=+1399.287334909" lastFinishedPulling="2025-11-25 18:36:11.463969966 +0000 UTC m=+1401.849483581" observedRunningTime="2025-11-25 18:36:11.973413023 +0000 UTC m=+1402.358926638" watchObservedRunningTime="2025-11-25 18:36:11.976969975 +0000 UTC m=+1402.362483580" Nov 25 18:36:12 crc kubenswrapper[4926]: I1125 18:36:12.954694 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86b86f76c-cbrkb" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.038063 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cd46dfcd7-qrkdx"] Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.038306 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" podUID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerName="dnsmasq-dns" containerID="cri-o://e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572" gracePeriod=10 Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.651751 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.695613 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-sb\") pod \"f69fd537-3678-4c91-b9c0-6d301dd474fa\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.695908 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbtjr\" (UniqueName: \"kubernetes.io/projected/f69fd537-3678-4c91-b9c0-6d301dd474fa-kube-api-access-nbtjr\") pod \"f69fd537-3678-4c91-b9c0-6d301dd474fa\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.696058 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-openstack-edpm-ipam\") pod \"f69fd537-3678-4c91-b9c0-6d301dd474fa\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.696090 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-nb\") pod \"f69fd537-3678-4c91-b9c0-6d301dd474fa\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.696168 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-svc\") pod \"f69fd537-3678-4c91-b9c0-6d301dd474fa\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.696289 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-config\") pod \"f69fd537-3678-4c91-b9c0-6d301dd474fa\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.696697 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-swift-storage-0\") pod \"f69fd537-3678-4c91-b9c0-6d301dd474fa\" (UID: \"f69fd537-3678-4c91-b9c0-6d301dd474fa\") " Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.701994 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f69fd537-3678-4c91-b9c0-6d301dd474fa-kube-api-access-nbtjr" (OuterVolumeSpecName: "kube-api-access-nbtjr") pod "f69fd537-3678-4c91-b9c0-6d301dd474fa" (UID: "f69fd537-3678-4c91-b9c0-6d301dd474fa"). InnerVolumeSpecName "kube-api-access-nbtjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.774863 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f69fd537-3678-4c91-b9c0-6d301dd474fa" (UID: "f69fd537-3678-4c91-b9c0-6d301dd474fa"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.798716 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.798746 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbtjr\" (UniqueName: \"kubernetes.io/projected/f69fd537-3678-4c91-b9c0-6d301dd474fa-kube-api-access-nbtjr\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.815750 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f69fd537-3678-4c91-b9c0-6d301dd474fa" (UID: "f69fd537-3678-4c91-b9c0-6d301dd474fa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.816662 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "f69fd537-3678-4c91-b9c0-6d301dd474fa" (UID: "f69fd537-3678-4c91-b9c0-6d301dd474fa"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.824769 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f69fd537-3678-4c91-b9c0-6d301dd474fa" (UID: "f69fd537-3678-4c91-b9c0-6d301dd474fa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.824842 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-config" (OuterVolumeSpecName: "config") pod "f69fd537-3678-4c91-b9c0-6d301dd474fa" (UID: "f69fd537-3678-4c91-b9c0-6d301dd474fa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.829121 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f69fd537-3678-4c91-b9c0-6d301dd474fa" (UID: "f69fd537-3678-4c91-b9c0-6d301dd474fa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.900993 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.901034 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.901050 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.901062 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-config\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.901075 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f69fd537-3678-4c91-b9c0-6d301dd474fa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.971932 4926 generic.go:334] "Generic (PLEG): container finished" podID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerID="e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572" exitCode=0 Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.971973 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" event={"ID":"f69fd537-3678-4c91-b9c0-6d301dd474fa","Type":"ContainerDied","Data":"e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572"} Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.971994 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.972022 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd46dfcd7-qrkdx" event={"ID":"f69fd537-3678-4c91-b9c0-6d301dd474fa","Type":"ContainerDied","Data":"08214ed0cade526730f2876541cc082500610c88821470e94c33730faa9f4940"} Nov 25 18:36:13 crc kubenswrapper[4926]: I1125 18:36:13.972046 4926 scope.go:117] "RemoveContainer" containerID="e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572" Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.010274 4926 scope.go:117] "RemoveContainer" containerID="450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07" Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.012963 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cd46dfcd7-qrkdx"] Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.029386 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cd46dfcd7-qrkdx"] Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.032596 4926 scope.go:117] "RemoveContainer" containerID="e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572" Nov 25 18:36:14 crc kubenswrapper[4926]: E1125 18:36:14.033139 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572\": container with ID starting with e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572 not found: ID does not exist" containerID="e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572" Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.033186 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572"} err="failed to get container status \"e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572\": rpc error: code = NotFound desc = could not find container \"e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572\": container with ID starting with e54bdf847cf0535f491790a794adbfd4febaf671b3882ccd29c82a7d2d7c7572 not found: ID does not exist" Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.033219 4926 scope.go:117] "RemoveContainer" containerID="450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07" Nov 25 18:36:14 crc kubenswrapper[4926]: E1125 18:36:14.033868 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07\": container with ID starting with 450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07 not found: ID does not exist" containerID="450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07" Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.033898 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07"} err="failed to get container status \"450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07\": rpc error: code = NotFound desc = could not find container \"450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07\": container with ID starting with 450d711fad2899858048eb7f759dfe1eca33b1a051dc91f472593ae1e5d4fb07 not found: ID does not exist" Nov 25 18:36:14 crc kubenswrapper[4926]: I1125 18:36:14.343813 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f69fd537-3678-4c91-b9c0-6d301dd474fa" path="/var/lib/kubelet/pods/f69fd537-3678-4c91-b9c0-6d301dd474fa/volumes" Nov 25 18:36:17 crc kubenswrapper[4926]: I1125 18:36:17.574659 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:17 crc kubenswrapper[4926]: I1125 18:36:17.575100 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:17 crc kubenswrapper[4926]: I1125 18:36:17.665991 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:18 crc kubenswrapper[4926]: I1125 18:36:18.021706 4926 generic.go:334] "Generic (PLEG): container finished" podID="c9470d78-c381-4be0-b06e-13e3f97422ac" containerID="efc14dbfed79ced79fa41851fd54bfa3be28f09f2ed78c5211680daa9a967e6b" exitCode=0 Nov 25 18:36:18 crc kubenswrapper[4926]: I1125 18:36:18.021751 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c9470d78-c381-4be0-b06e-13e3f97422ac","Type":"ContainerDied","Data":"efc14dbfed79ced79fa41851fd54bfa3be28f09f2ed78c5211680daa9a967e6b"} Nov 25 18:36:18 crc kubenswrapper[4926]: I1125 18:36:18.085870 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:18 crc kubenswrapper[4926]: I1125 18:36:18.151553 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p7ljv"] Nov 25 18:36:18 crc kubenswrapper[4926]: I1125 18:36:18.954230 4926 scope.go:117] "RemoveContainer" containerID="1140a4cb17c37bbcea4321661bee910d6f09a5df0dc77e80ce7b4b99b90a2dc5" Nov 25 18:36:18 crc kubenswrapper[4926]: I1125 18:36:18.977912 4926 scope.go:117] "RemoveContainer" containerID="7b5790e546b285a77b792d10763aefeb92245339fdec2cc6858f05ffbfc9ecca" Nov 25 18:36:19 crc kubenswrapper[4926]: I1125 18:36:19.039918 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c9470d78-c381-4be0-b06e-13e3f97422ac","Type":"ContainerStarted","Data":"1bbd17aeda583e2bcec268953f211c0fa98ed83d4d4b4bcba62c1a3153949223"} Nov 25 18:36:19 crc kubenswrapper[4926]: I1125 18:36:19.041182 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 18:36:19 crc kubenswrapper[4926]: I1125 18:36:19.045615 4926 generic.go:334] "Generic (PLEG): container finished" podID="249e603a-e4df-4b46-941d-ab40c5374c95" containerID="5f2597c24d2c861b74b3cef01c37be36e5d85bc2b30612e2a91a841b53d5cf9c" exitCode=0 Nov 25 18:36:19 crc kubenswrapper[4926]: I1125 18:36:19.046233 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"249e603a-e4df-4b46-941d-ab40c5374c95","Type":"ContainerDied","Data":"5f2597c24d2c861b74b3cef01c37be36e5d85bc2b30612e2a91a841b53d5cf9c"} Nov 25 18:36:19 crc kubenswrapper[4926]: I1125 18:36:19.081224 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.081203614 podStartE2EDuration="38.081203614s" podCreationTimestamp="2025-11-25 18:35:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:36:19.07024474 +0000 UTC m=+1409.455758365" watchObservedRunningTime="2025-11-25 18:36:19.081203614 +0000 UTC m=+1409.466717229" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.062196 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"249e603a-e4df-4b46-941d-ab40c5374c95","Type":"ContainerStarted","Data":"099b04f61256c9f1e9d4341ec45fce5f433dbf345884c6c3288829f407157368"} Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.062745 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p7ljv" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="registry-server" containerID="cri-o://345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd" gracePeriod=2 Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.063145 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.114904 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.114875092 podStartE2EDuration="39.114875092s" podCreationTimestamp="2025-11-25 18:35:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:36:20.104053542 +0000 UTC m=+1410.489567177" watchObservedRunningTime="2025-11-25 18:36:20.114875092 +0000 UTC m=+1410.500388737" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.606134 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.644101 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-catalog-content\") pod \"34213808-cec6-436d-8337-c04e52526839\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.644178 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-utilities\") pod \"34213808-cec6-436d-8337-c04e52526839\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.644318 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnw6z\" (UniqueName: \"kubernetes.io/projected/34213808-cec6-436d-8337-c04e52526839-kube-api-access-cnw6z\") pod \"34213808-cec6-436d-8337-c04e52526839\" (UID: \"34213808-cec6-436d-8337-c04e52526839\") " Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.645709 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-utilities" (OuterVolumeSpecName: "utilities") pod "34213808-cec6-436d-8337-c04e52526839" (UID: "34213808-cec6-436d-8337-c04e52526839"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.668175 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34213808-cec6-436d-8337-c04e52526839-kube-api-access-cnw6z" (OuterVolumeSpecName: "kube-api-access-cnw6z") pod "34213808-cec6-436d-8337-c04e52526839" (UID: "34213808-cec6-436d-8337-c04e52526839"). InnerVolumeSpecName "kube-api-access-cnw6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.742966 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "34213808-cec6-436d-8337-c04e52526839" (UID: "34213808-cec6-436d-8337-c04e52526839"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.746462 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnw6z\" (UniqueName: \"kubernetes.io/projected/34213808-cec6-436d-8337-c04e52526839-kube-api-access-cnw6z\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.746498 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:20 crc kubenswrapper[4926]: I1125 18:36:20.746513 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34213808-cec6-436d-8337-c04e52526839-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.076563 4926 generic.go:334] "Generic (PLEG): container finished" podID="34213808-cec6-436d-8337-c04e52526839" containerID="345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd" exitCode=0 Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.076651 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7ljv" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.076652 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7ljv" event={"ID":"34213808-cec6-436d-8337-c04e52526839","Type":"ContainerDied","Data":"345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd"} Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.076743 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7ljv" event={"ID":"34213808-cec6-436d-8337-c04e52526839","Type":"ContainerDied","Data":"578f2b28582f1307f04cff0bd80c2e40c9f0cb5e88c056f6535c9ed54de95a60"} Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.076775 4926 scope.go:117] "RemoveContainer" containerID="345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.115485 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p7ljv"] Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.117058 4926 scope.go:117] "RemoveContainer" containerID="570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.124922 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p7ljv"] Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.143494 4926 scope.go:117] "RemoveContainer" containerID="988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.217561 4926 scope.go:117] "RemoveContainer" containerID="345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd" Nov 25 18:36:21 crc kubenswrapper[4926]: E1125 18:36:21.218153 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd\": container with ID starting with 345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd not found: ID does not exist" containerID="345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.218278 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd"} err="failed to get container status \"345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd\": rpc error: code = NotFound desc = could not find container \"345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd\": container with ID starting with 345b3dd5aee2657fab5a3641aca97c8b9b602c2b7b7a1292c5d420f2f5d391cd not found: ID does not exist" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.218358 4926 scope.go:117] "RemoveContainer" containerID="570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e" Nov 25 18:36:21 crc kubenswrapper[4926]: E1125 18:36:21.218852 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e\": container with ID starting with 570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e not found: ID does not exist" containerID="570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.218907 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e"} err="failed to get container status \"570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e\": rpc error: code = NotFound desc = could not find container \"570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e\": container with ID starting with 570b97fb31b16c8542af19124ae92cd225ac5f511e38a297ec405f5f09ce378e not found: ID does not exist" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.218941 4926 scope.go:117] "RemoveContainer" containerID="988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648" Nov 25 18:36:21 crc kubenswrapper[4926]: E1125 18:36:21.219476 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648\": container with ID starting with 988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648 not found: ID does not exist" containerID="988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648" Nov 25 18:36:21 crc kubenswrapper[4926]: I1125 18:36:21.219543 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648"} err="failed to get container status \"988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648\": rpc error: code = NotFound desc = could not find container \"988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648\": container with ID starting with 988384236dbc8542eb343755edebaa8133d35e8b5f8b30eb1e5e13f2656a7648 not found: ID does not exist" Nov 25 18:36:22 crc kubenswrapper[4926]: I1125 18:36:22.347524 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34213808-cec6-436d-8337-c04e52526839" path="/var/lib/kubelet/pods/34213808-cec6-436d-8337-c04e52526839/volumes" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.205328 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f"] Nov 25 18:36:31 crc kubenswrapper[4926]: E1125 18:36:31.212836 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerName="dnsmasq-dns" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.212955 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerName="dnsmasq-dns" Nov 25 18:36:31 crc kubenswrapper[4926]: E1125 18:36:31.213046 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="extract-content" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.213108 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="extract-content" Nov 25 18:36:31 crc kubenswrapper[4926]: E1125 18:36:31.213187 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="registry-server" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.213256 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="registry-server" Nov 25 18:36:31 crc kubenswrapper[4926]: E1125 18:36:31.213335 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="extract-utilities" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.213420 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="extract-utilities" Nov 25 18:36:31 crc kubenswrapper[4926]: E1125 18:36:31.213529 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerName="init" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.213592 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerName="init" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.213875 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f69fd537-3678-4c91-b9c0-6d301dd474fa" containerName="dnsmasq-dns" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.213958 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="34213808-cec6-436d-8337-c04e52526839" containerName="registry-server" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.214851 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.219451 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f"] Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.220743 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.220993 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.222387 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.222649 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.262304 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.262340 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.262384 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.262467 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqbzq\" (UniqueName: \"kubernetes.io/projected/06191797-281e-401d-ab9e-c394f6e5f19d-kube-api-access-hqbzq\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.364588 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqbzq\" (UniqueName: \"kubernetes.io/projected/06191797-281e-401d-ab9e-c394f6e5f19d-kube-api-access-hqbzq\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.364739 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.364762 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.364798 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.372441 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.373921 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.384019 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.388158 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqbzq\" (UniqueName: \"kubernetes.io/projected/06191797-281e-401d-ab9e-c394f6e5f19d-kube-api-access-hqbzq\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:31 crc kubenswrapper[4926]: I1125 18:36:31.541263 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:32 crc kubenswrapper[4926]: I1125 18:36:32.100165 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f"] Nov 25 18:36:32 crc kubenswrapper[4926]: I1125 18:36:32.225029 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 18:36:32 crc kubenswrapper[4926]: I1125 18:36:32.231256 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" event={"ID":"06191797-281e-401d-ab9e-c394f6e5f19d","Type":"ContainerStarted","Data":"e37cbfa02e8e2559d882c1b103fef612a4933e804608b997f07280a59bfd89ee"} Nov 25 18:36:32 crc kubenswrapper[4926]: I1125 18:36:32.571640 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 18:36:41 crc kubenswrapper[4926]: I1125 18:36:41.322992 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" event={"ID":"06191797-281e-401d-ab9e-c394f6e5f19d","Type":"ContainerStarted","Data":"3bff636a8eab95d2c748d22f4067ad1bf6dc4565ea5b84fe6d4ba5c432d0d3e5"} Nov 25 18:36:41 crc kubenswrapper[4926]: I1125 18:36:41.347573 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" podStartSLOduration=1.43588516 podStartE2EDuration="10.347552742s" podCreationTimestamp="2025-11-25 18:36:31 +0000 UTC" firstStartedPulling="2025-11-25 18:36:32.113997836 +0000 UTC m=+1422.499511461" lastFinishedPulling="2025-11-25 18:36:41.025665448 +0000 UTC m=+1431.411179043" observedRunningTime="2025-11-25 18:36:41.345674773 +0000 UTC m=+1431.731188408" watchObservedRunningTime="2025-11-25 18:36:41.347552742 +0000 UTC m=+1431.733066357" Nov 25 18:36:53 crc kubenswrapper[4926]: I1125 18:36:53.501518 4926 generic.go:334] "Generic (PLEG): container finished" podID="06191797-281e-401d-ab9e-c394f6e5f19d" containerID="3bff636a8eab95d2c748d22f4067ad1bf6dc4565ea5b84fe6d4ba5c432d0d3e5" exitCode=0 Nov 25 18:36:53 crc kubenswrapper[4926]: I1125 18:36:53.501708 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" event={"ID":"06191797-281e-401d-ab9e-c394f6e5f19d","Type":"ContainerDied","Data":"3bff636a8eab95d2c748d22f4067ad1bf6dc4565ea5b84fe6d4ba5c432d0d3e5"} Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.650102 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jclpd"] Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.652936 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.672503 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jclpd"] Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.741882 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-catalog-content\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.742227 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-utilities\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.742407 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pl6p\" (UniqueName: \"kubernetes.io/projected/6e7a71f1-ac29-48c4-a53b-e03cc969549d-kube-api-access-6pl6p\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.845248 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-catalog-content\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.845428 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-utilities\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.845486 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pl6p\" (UniqueName: \"kubernetes.io/projected/6e7a71f1-ac29-48c4-a53b-e03cc969549d-kube-api-access-6pl6p\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.845930 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-catalog-content\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.846144 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-utilities\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.899154 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pl6p\" (UniqueName: \"kubernetes.io/projected/6e7a71f1-ac29-48c4-a53b-e03cc969549d-kube-api-access-6pl6p\") pod \"certified-operators-jclpd\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:54 crc kubenswrapper[4926]: I1125 18:36:54.981291 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.090735 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.154031 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqbzq\" (UniqueName: \"kubernetes.io/projected/06191797-281e-401d-ab9e-c394f6e5f19d-kube-api-access-hqbzq\") pod \"06191797-281e-401d-ab9e-c394f6e5f19d\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.154149 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-repo-setup-combined-ca-bundle\") pod \"06191797-281e-401d-ab9e-c394f6e5f19d\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.154255 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-ssh-key\") pod \"06191797-281e-401d-ab9e-c394f6e5f19d\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.154355 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-inventory\") pod \"06191797-281e-401d-ab9e-c394f6e5f19d\" (UID: \"06191797-281e-401d-ab9e-c394f6e5f19d\") " Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.159991 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "06191797-281e-401d-ab9e-c394f6e5f19d" (UID: "06191797-281e-401d-ab9e-c394f6e5f19d"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.162863 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06191797-281e-401d-ab9e-c394f6e5f19d-kube-api-access-hqbzq" (OuterVolumeSpecName: "kube-api-access-hqbzq") pod "06191797-281e-401d-ab9e-c394f6e5f19d" (UID: "06191797-281e-401d-ab9e-c394f6e5f19d"). InnerVolumeSpecName "kube-api-access-hqbzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.192590 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "06191797-281e-401d-ab9e-c394f6e5f19d" (UID: "06191797-281e-401d-ab9e-c394f6e5f19d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.217332 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-inventory" (OuterVolumeSpecName: "inventory") pod "06191797-281e-401d-ab9e-c394f6e5f19d" (UID: "06191797-281e-401d-ab9e-c394f6e5f19d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.257110 4926 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.257150 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.257164 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06191797-281e-401d-ab9e-c394f6e5f19d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.257176 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqbzq\" (UniqueName: \"kubernetes.io/projected/06191797-281e-401d-ab9e-c394f6e5f19d-kube-api-access-hqbzq\") on node \"crc\" DevicePath \"\"" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.493481 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jclpd"] Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.528448 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.528452 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f" event={"ID":"06191797-281e-401d-ab9e-c394f6e5f19d","Type":"ContainerDied","Data":"e37cbfa02e8e2559d882c1b103fef612a4933e804608b997f07280a59bfd89ee"} Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.528575 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e37cbfa02e8e2559d882c1b103fef612a4933e804608b997f07280a59bfd89ee" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.529615 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jclpd" event={"ID":"6e7a71f1-ac29-48c4-a53b-e03cc969549d","Type":"ContainerStarted","Data":"bf78ebe20a015620c97c55c376de2a50852c314b6483f68f929b7b529cb62e99"} Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.602235 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm"] Nov 25 18:36:55 crc kubenswrapper[4926]: E1125 18:36:55.603416 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06191797-281e-401d-ab9e-c394f6e5f19d" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.603518 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="06191797-281e-401d-ab9e-c394f6e5f19d" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.603788 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="06191797-281e-401d-ab9e-c394f6e5f19d" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.604904 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.609312 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.609317 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.610571 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.612976 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.618116 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm"] Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.664082 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.664194 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.664243 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qpph\" (UniqueName: \"kubernetes.io/projected/a2a96977-a5be-4222-86bf-7caf90e17f8d-kube-api-access-2qpph\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.766003 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.766070 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qpph\" (UniqueName: \"kubernetes.io/projected/a2a96977-a5be-4222-86bf-7caf90e17f8d-kube-api-access-2qpph\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.766178 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.771092 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.774863 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.783360 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qpph\" (UniqueName: \"kubernetes.io/projected/a2a96977-a5be-4222-86bf-7caf90e17f8d-kube-api-access-2qpph\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fh5fm\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:55 crc kubenswrapper[4926]: I1125 18:36:55.923903 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:36:56 crc kubenswrapper[4926]: I1125 18:36:56.544284 4926 generic.go:334] "Generic (PLEG): container finished" podID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerID="821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2" exitCode=0 Nov 25 18:36:56 crc kubenswrapper[4926]: I1125 18:36:56.544455 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jclpd" event={"ID":"6e7a71f1-ac29-48c4-a53b-e03cc969549d","Type":"ContainerDied","Data":"821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2"} Nov 25 18:36:56 crc kubenswrapper[4926]: I1125 18:36:56.553836 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm"] Nov 25 18:36:56 crc kubenswrapper[4926]: W1125 18:36:56.561870 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2a96977_a5be_4222_86bf_7caf90e17f8d.slice/crio-3e4db849e51bc04bc6934879052a3c26ed376c5b3d8d2db17fd7a5a212c5cfa5 WatchSource:0}: Error finding container 3e4db849e51bc04bc6934879052a3c26ed376c5b3d8d2db17fd7a5a212c5cfa5: Status 404 returned error can't find the container with id 3e4db849e51bc04bc6934879052a3c26ed376c5b3d8d2db17fd7a5a212c5cfa5 Nov 25 18:36:57 crc kubenswrapper[4926]: I1125 18:36:57.570188 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" event={"ID":"a2a96977-a5be-4222-86bf-7caf90e17f8d","Type":"ContainerStarted","Data":"2e2089450cbfe79e4edb1e2e4798c6e612c4565a17581000e4e12a5ab8c0bc2c"} Nov 25 18:36:57 crc kubenswrapper[4926]: I1125 18:36:57.570762 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" event={"ID":"a2a96977-a5be-4222-86bf-7caf90e17f8d","Type":"ContainerStarted","Data":"3e4db849e51bc04bc6934879052a3c26ed376c5b3d8d2db17fd7a5a212c5cfa5"} Nov 25 18:36:57 crc kubenswrapper[4926]: I1125 18:36:57.575307 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jclpd" event={"ID":"6e7a71f1-ac29-48c4-a53b-e03cc969549d","Type":"ContainerStarted","Data":"699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac"} Nov 25 18:36:57 crc kubenswrapper[4926]: I1125 18:36:57.599321 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" podStartSLOduration=2.186630879 podStartE2EDuration="2.599298241s" podCreationTimestamp="2025-11-25 18:36:55 +0000 UTC" firstStartedPulling="2025-11-25 18:36:56.567498992 +0000 UTC m=+1446.953012597" lastFinishedPulling="2025-11-25 18:36:56.980166324 +0000 UTC m=+1447.365679959" observedRunningTime="2025-11-25 18:36:57.589179307 +0000 UTC m=+1447.974692912" watchObservedRunningTime="2025-11-25 18:36:57.599298241 +0000 UTC m=+1447.984811846" Nov 25 18:36:58 crc kubenswrapper[4926]: I1125 18:36:58.599742 4926 generic.go:334] "Generic (PLEG): container finished" podID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerID="699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac" exitCode=0 Nov 25 18:36:58 crc kubenswrapper[4926]: I1125 18:36:58.600679 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jclpd" event={"ID":"6e7a71f1-ac29-48c4-a53b-e03cc969549d","Type":"ContainerDied","Data":"699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac"} Nov 25 18:36:59 crc kubenswrapper[4926]: I1125 18:36:59.614233 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jclpd" event={"ID":"6e7a71f1-ac29-48c4-a53b-e03cc969549d","Type":"ContainerStarted","Data":"f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2"} Nov 25 18:36:59 crc kubenswrapper[4926]: I1125 18:36:59.635863 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jclpd" podStartSLOduration=3.162930482 podStartE2EDuration="5.635838426s" podCreationTimestamp="2025-11-25 18:36:54 +0000 UTC" firstStartedPulling="2025-11-25 18:36:56.546887935 +0000 UTC m=+1446.932401550" lastFinishedPulling="2025-11-25 18:36:59.019795849 +0000 UTC m=+1449.405309494" observedRunningTime="2025-11-25 18:36:59.630161698 +0000 UTC m=+1450.015675314" watchObservedRunningTime="2025-11-25 18:36:59.635838426 +0000 UTC m=+1450.021352031" Nov 25 18:37:00 crc kubenswrapper[4926]: I1125 18:37:00.629288 4926 generic.go:334] "Generic (PLEG): container finished" podID="a2a96977-a5be-4222-86bf-7caf90e17f8d" containerID="2e2089450cbfe79e4edb1e2e4798c6e612c4565a17581000e4e12a5ab8c0bc2c" exitCode=0 Nov 25 18:37:00 crc kubenswrapper[4926]: I1125 18:37:00.629438 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" event={"ID":"a2a96977-a5be-4222-86bf-7caf90e17f8d","Type":"ContainerDied","Data":"2e2089450cbfe79e4edb1e2e4798c6e612c4565a17581000e4e12a5ab8c0bc2c"} Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.173525 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.285353 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-ssh-key\") pod \"a2a96977-a5be-4222-86bf-7caf90e17f8d\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.285804 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-inventory\") pod \"a2a96977-a5be-4222-86bf-7caf90e17f8d\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.285847 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qpph\" (UniqueName: \"kubernetes.io/projected/a2a96977-a5be-4222-86bf-7caf90e17f8d-kube-api-access-2qpph\") pod \"a2a96977-a5be-4222-86bf-7caf90e17f8d\" (UID: \"a2a96977-a5be-4222-86bf-7caf90e17f8d\") " Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.291254 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2a96977-a5be-4222-86bf-7caf90e17f8d-kube-api-access-2qpph" (OuterVolumeSpecName: "kube-api-access-2qpph") pod "a2a96977-a5be-4222-86bf-7caf90e17f8d" (UID: "a2a96977-a5be-4222-86bf-7caf90e17f8d"). InnerVolumeSpecName "kube-api-access-2qpph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.314684 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-inventory" (OuterVolumeSpecName: "inventory") pod "a2a96977-a5be-4222-86bf-7caf90e17f8d" (UID: "a2a96977-a5be-4222-86bf-7caf90e17f8d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.317357 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a2a96977-a5be-4222-86bf-7caf90e17f8d" (UID: "a2a96977-a5be-4222-86bf-7caf90e17f8d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.388082 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.388110 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2a96977-a5be-4222-86bf-7caf90e17f8d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.388123 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qpph\" (UniqueName: \"kubernetes.io/projected/a2a96977-a5be-4222-86bf-7caf90e17f8d-kube-api-access-2qpph\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.651496 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" event={"ID":"a2a96977-a5be-4222-86bf-7caf90e17f8d","Type":"ContainerDied","Data":"3e4db849e51bc04bc6934879052a3c26ed376c5b3d8d2db17fd7a5a212c5cfa5"} Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.651816 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e4db849e51bc04bc6934879052a3c26ed376c5b3d8d2db17fd7a5a212c5cfa5" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.651950 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fh5fm" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.726626 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9"] Nov 25 18:37:02 crc kubenswrapper[4926]: E1125 18:37:02.727031 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2a96977-a5be-4222-86bf-7caf90e17f8d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.727047 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2a96977-a5be-4222-86bf-7caf90e17f8d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.727241 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2a96977-a5be-4222-86bf-7caf90e17f8d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.727890 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.730274 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.730442 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.730496 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.741799 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9"] Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.783587 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.895424 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.895555 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdf8r\" (UniqueName: \"kubernetes.io/projected/c38a7543-0881-45e4-b1b3-1c515379526a-kube-api-access-vdf8r\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.895581 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.895622 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.997944 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.998058 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdf8r\" (UniqueName: \"kubernetes.io/projected/c38a7543-0881-45e4-b1b3-1c515379526a-kube-api-access-vdf8r\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.998090 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:02 crc kubenswrapper[4926]: I1125 18:37:02.998129 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:03 crc kubenswrapper[4926]: I1125 18:37:03.004326 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:03 crc kubenswrapper[4926]: I1125 18:37:03.005327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:03 crc kubenswrapper[4926]: I1125 18:37:03.005791 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:03 crc kubenswrapper[4926]: I1125 18:37:03.028005 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdf8r\" (UniqueName: \"kubernetes.io/projected/c38a7543-0881-45e4-b1b3-1c515379526a-kube-api-access-vdf8r\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:03 crc kubenswrapper[4926]: I1125 18:37:03.093317 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:37:03 crc kubenswrapper[4926]: I1125 18:37:03.742494 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9"] Nov 25 18:37:04 crc kubenswrapper[4926]: I1125 18:37:04.675799 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" event={"ID":"c38a7543-0881-45e4-b1b3-1c515379526a","Type":"ContainerStarted","Data":"1409c45bcbf4380f04674ec0476b93e87ef8b18e6cfb4bff4c53f6cbc0ec8507"} Nov 25 18:37:04 crc kubenswrapper[4926]: I1125 18:37:04.676166 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" event={"ID":"c38a7543-0881-45e4-b1b3-1c515379526a","Type":"ContainerStarted","Data":"13056d66996669334205b29d51eee432f0fda3eafbe975211c0b19997411e296"} Nov 25 18:37:04 crc kubenswrapper[4926]: I1125 18:37:04.712005 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" podStartSLOduration=2.224328268 podStartE2EDuration="2.711977909s" podCreationTimestamp="2025-11-25 18:37:02 +0000 UTC" firstStartedPulling="2025-11-25 18:37:03.752743576 +0000 UTC m=+1454.138257171" lastFinishedPulling="2025-11-25 18:37:04.240393197 +0000 UTC m=+1454.625906812" observedRunningTime="2025-11-25 18:37:04.693086018 +0000 UTC m=+1455.078599663" watchObservedRunningTime="2025-11-25 18:37:04.711977909 +0000 UTC m=+1455.097491534" Nov 25 18:37:04 crc kubenswrapper[4926]: I1125 18:37:04.981728 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:37:04 crc kubenswrapper[4926]: I1125 18:37:04.981823 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:37:05 crc kubenswrapper[4926]: I1125 18:37:05.029581 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:37:05 crc kubenswrapper[4926]: I1125 18:37:05.766360 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:37:05 crc kubenswrapper[4926]: I1125 18:37:05.831061 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jclpd"] Nov 25 18:37:07 crc kubenswrapper[4926]: I1125 18:37:07.720534 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jclpd" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="registry-server" containerID="cri-o://f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2" gracePeriod=2 Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.231051 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.416794 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-utilities\") pod \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.416871 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pl6p\" (UniqueName: \"kubernetes.io/projected/6e7a71f1-ac29-48c4-a53b-e03cc969549d-kube-api-access-6pl6p\") pod \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.417000 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-catalog-content\") pod \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\" (UID: \"6e7a71f1-ac29-48c4-a53b-e03cc969549d\") " Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.418259 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-utilities" (OuterVolumeSpecName: "utilities") pod "6e7a71f1-ac29-48c4-a53b-e03cc969549d" (UID: "6e7a71f1-ac29-48c4-a53b-e03cc969549d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.423151 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e7a71f1-ac29-48c4-a53b-e03cc969549d-kube-api-access-6pl6p" (OuterVolumeSpecName: "kube-api-access-6pl6p") pod "6e7a71f1-ac29-48c4-a53b-e03cc969549d" (UID: "6e7a71f1-ac29-48c4-a53b-e03cc969549d"). InnerVolumeSpecName "kube-api-access-6pl6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.472150 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e7a71f1-ac29-48c4-a53b-e03cc969549d" (UID: "6e7a71f1-ac29-48c4-a53b-e03cc969549d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.520670 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.521418 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pl6p\" (UniqueName: \"kubernetes.io/projected/6e7a71f1-ac29-48c4-a53b-e03cc969549d-kube-api-access-6pl6p\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.521448 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7a71f1-ac29-48c4-a53b-e03cc969549d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.737649 4926 generic.go:334] "Generic (PLEG): container finished" podID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerID="f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2" exitCode=0 Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.737701 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jclpd" event={"ID":"6e7a71f1-ac29-48c4-a53b-e03cc969549d","Type":"ContainerDied","Data":"f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2"} Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.737732 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jclpd" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.737755 4926 scope.go:117] "RemoveContainer" containerID="f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.737743 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jclpd" event={"ID":"6e7a71f1-ac29-48c4-a53b-e03cc969549d","Type":"ContainerDied","Data":"bf78ebe20a015620c97c55c376de2a50852c314b6483f68f929b7b529cb62e99"} Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.770115 4926 scope.go:117] "RemoveContainer" containerID="699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.818169 4926 scope.go:117] "RemoveContainer" containerID="821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.826594 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jclpd"] Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.845650 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jclpd"] Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.875484 4926 scope.go:117] "RemoveContainer" containerID="f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2" Nov 25 18:37:08 crc kubenswrapper[4926]: E1125 18:37:08.876302 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2\": container with ID starting with f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2 not found: ID does not exist" containerID="f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.876440 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2"} err="failed to get container status \"f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2\": rpc error: code = NotFound desc = could not find container \"f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2\": container with ID starting with f99134632e29602e48659c1699df58e7dd9dbd8bcc49237135921c42a25db2f2 not found: ID does not exist" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.876570 4926 scope.go:117] "RemoveContainer" containerID="699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac" Nov 25 18:37:08 crc kubenswrapper[4926]: E1125 18:37:08.877085 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac\": container with ID starting with 699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac not found: ID does not exist" containerID="699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.877125 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac"} err="failed to get container status \"699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac\": rpc error: code = NotFound desc = could not find container \"699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac\": container with ID starting with 699a53cf6a9ce448f29e48163abec7ce2da0521a32942b3c8c0e18863af63aac not found: ID does not exist" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.877147 4926 scope.go:117] "RemoveContainer" containerID="821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2" Nov 25 18:37:08 crc kubenswrapper[4926]: E1125 18:37:08.877447 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2\": container with ID starting with 821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2 not found: ID does not exist" containerID="821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2" Nov 25 18:37:08 crc kubenswrapper[4926]: I1125 18:37:08.877532 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2"} err="failed to get container status \"821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2\": rpc error: code = NotFound desc = could not find container \"821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2\": container with ID starting with 821641488cd90d5e9807bd18d04956d5cdb586f7a9aaf5a9d07dfe5af730add2 not found: ID does not exist" Nov 25 18:37:09 crc kubenswrapper[4926]: E1125 18:37:09.005089 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e7a71f1_ac29_48c4_a53b_e03cc969549d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e7a71f1_ac29_48c4_a53b_e03cc969549d.slice/crio-bf78ebe20a015620c97c55c376de2a50852c314b6483f68f929b7b529cb62e99\": RecentStats: unable to find data in memory cache]" Nov 25 18:37:10 crc kubenswrapper[4926]: I1125 18:37:10.339800 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" path="/var/lib/kubelet/pods/6e7a71f1-ac29-48c4-a53b-e03cc969549d/volumes" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.409293 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h9225"] Nov 25 18:37:18 crc kubenswrapper[4926]: E1125 18:37:18.410311 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="registry-server" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.410325 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="registry-server" Nov 25 18:37:18 crc kubenswrapper[4926]: E1125 18:37:18.410388 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="extract-utilities" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.410397 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="extract-utilities" Nov 25 18:37:18 crc kubenswrapper[4926]: E1125 18:37:18.410422 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="extract-content" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.410430 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="extract-content" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.410642 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e7a71f1-ac29-48c4-a53b-e03cc969549d" containerName="registry-server" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.412076 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.427534 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h9225"] Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.460681 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-catalog-content\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.461004 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-utilities\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.461207 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szwkk\" (UniqueName: \"kubernetes.io/projected/2985bb1b-465d-4fbe-9a08-5442f0d02190-kube-api-access-szwkk\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.563522 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-utilities\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.563605 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szwkk\" (UniqueName: \"kubernetes.io/projected/2985bb1b-465d-4fbe-9a08-5442f0d02190-kube-api-access-szwkk\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.563683 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-catalog-content\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.564145 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-utilities\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.564165 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-catalog-content\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.582693 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szwkk\" (UniqueName: \"kubernetes.io/projected/2985bb1b-465d-4fbe-9a08-5442f0d02190-kube-api-access-szwkk\") pod \"community-operators-h9225\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:18 crc kubenswrapper[4926]: I1125 18:37:18.761099 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:19 crc kubenswrapper[4926]: I1125 18:37:19.186857 4926 scope.go:117] "RemoveContainer" containerID="0ffadf5eeb24a99e8f20e8a35f8dd76979e0e26d8eb368c5700951774a590eb2" Nov 25 18:37:19 crc kubenswrapper[4926]: I1125 18:37:19.189972 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h9225"] Nov 25 18:37:19 crc kubenswrapper[4926]: W1125 18:37:19.196601 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2985bb1b_465d_4fbe_9a08_5442f0d02190.slice/crio-bef8c41a740823c3e43e7b92dc42342e7483c4c3adec2e240415be77e2f52aef WatchSource:0}: Error finding container bef8c41a740823c3e43e7b92dc42342e7483c4c3adec2e240415be77e2f52aef: Status 404 returned error can't find the container with id bef8c41a740823c3e43e7b92dc42342e7483c4c3adec2e240415be77e2f52aef Nov 25 18:37:19 crc kubenswrapper[4926]: I1125 18:37:19.255775 4926 scope.go:117] "RemoveContainer" containerID="625b53ae16a8f68d2bd7f99606e5fb62e2bbbf4051f296daeca969a2afcb0148" Nov 25 18:37:19 crc kubenswrapper[4926]: I1125 18:37:19.874334 4926 generic.go:334] "Generic (PLEG): container finished" podID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerID="11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb" exitCode=0 Nov 25 18:37:19 crc kubenswrapper[4926]: I1125 18:37:19.874399 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h9225" event={"ID":"2985bb1b-465d-4fbe-9a08-5442f0d02190","Type":"ContainerDied","Data":"11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb"} Nov 25 18:37:19 crc kubenswrapper[4926]: I1125 18:37:19.874925 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h9225" event={"ID":"2985bb1b-465d-4fbe-9a08-5442f0d02190","Type":"ContainerStarted","Data":"bef8c41a740823c3e43e7b92dc42342e7483c4c3adec2e240415be77e2f52aef"} Nov 25 18:37:20 crc kubenswrapper[4926]: I1125 18:37:20.887100 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h9225" event={"ID":"2985bb1b-465d-4fbe-9a08-5442f0d02190","Type":"ContainerStarted","Data":"617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5"} Nov 25 18:37:21 crc kubenswrapper[4926]: I1125 18:37:21.904141 4926 generic.go:334] "Generic (PLEG): container finished" podID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerID="617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5" exitCode=0 Nov 25 18:37:21 crc kubenswrapper[4926]: I1125 18:37:21.904209 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h9225" event={"ID":"2985bb1b-465d-4fbe-9a08-5442f0d02190","Type":"ContainerDied","Data":"617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5"} Nov 25 18:37:22 crc kubenswrapper[4926]: I1125 18:37:22.919515 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h9225" event={"ID":"2985bb1b-465d-4fbe-9a08-5442f0d02190","Type":"ContainerStarted","Data":"131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd"} Nov 25 18:37:22 crc kubenswrapper[4926]: I1125 18:37:22.946885 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h9225" podStartSLOduration=2.441863828 podStartE2EDuration="4.946854596s" podCreationTimestamp="2025-11-25 18:37:18 +0000 UTC" firstStartedPulling="2025-11-25 18:37:19.876983671 +0000 UTC m=+1470.262497276" lastFinishedPulling="2025-11-25 18:37:22.381974399 +0000 UTC m=+1472.767488044" observedRunningTime="2025-11-25 18:37:22.937877383 +0000 UTC m=+1473.323391008" watchObservedRunningTime="2025-11-25 18:37:22.946854596 +0000 UTC m=+1473.332368221" Nov 25 18:37:28 crc kubenswrapper[4926]: I1125 18:37:28.761866 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:28 crc kubenswrapper[4926]: I1125 18:37:28.763908 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:28 crc kubenswrapper[4926]: I1125 18:37:28.835298 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:29 crc kubenswrapper[4926]: I1125 18:37:29.100790 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:29 crc kubenswrapper[4926]: I1125 18:37:29.179676 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h9225"] Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.035998 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-h9225" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="registry-server" containerID="cri-o://131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd" gracePeriod=2 Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.597909 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.659592 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szwkk\" (UniqueName: \"kubernetes.io/projected/2985bb1b-465d-4fbe-9a08-5442f0d02190-kube-api-access-szwkk\") pod \"2985bb1b-465d-4fbe-9a08-5442f0d02190\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.659804 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-utilities\") pod \"2985bb1b-465d-4fbe-9a08-5442f0d02190\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.659924 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-catalog-content\") pod \"2985bb1b-465d-4fbe-9a08-5442f0d02190\" (UID: \"2985bb1b-465d-4fbe-9a08-5442f0d02190\") " Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.660801 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-utilities" (OuterVolumeSpecName: "utilities") pod "2985bb1b-465d-4fbe-9a08-5442f0d02190" (UID: "2985bb1b-465d-4fbe-9a08-5442f0d02190"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.665169 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2985bb1b-465d-4fbe-9a08-5442f0d02190-kube-api-access-szwkk" (OuterVolumeSpecName: "kube-api-access-szwkk") pod "2985bb1b-465d-4fbe-9a08-5442f0d02190" (UID: "2985bb1b-465d-4fbe-9a08-5442f0d02190"). InnerVolumeSpecName "kube-api-access-szwkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.714292 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2985bb1b-465d-4fbe-9a08-5442f0d02190" (UID: "2985bb1b-465d-4fbe-9a08-5442f0d02190"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.762462 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szwkk\" (UniqueName: \"kubernetes.io/projected/2985bb1b-465d-4fbe-9a08-5442f0d02190-kube-api-access-szwkk\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.762803 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:31 crc kubenswrapper[4926]: I1125 18:37:31.762812 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985bb1b-465d-4fbe-9a08-5442f0d02190-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.052564 4926 generic.go:334] "Generic (PLEG): container finished" podID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerID="131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd" exitCode=0 Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.052634 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h9225" event={"ID":"2985bb1b-465d-4fbe-9a08-5442f0d02190","Type":"ContainerDied","Data":"131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd"} Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.052675 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h9225" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.052703 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h9225" event={"ID":"2985bb1b-465d-4fbe-9a08-5442f0d02190","Type":"ContainerDied","Data":"bef8c41a740823c3e43e7b92dc42342e7483c4c3adec2e240415be77e2f52aef"} Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.052729 4926 scope.go:117] "RemoveContainer" containerID="131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.112126 4926 scope.go:117] "RemoveContainer" containerID="617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.114288 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h9225"] Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.124556 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-h9225"] Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.147426 4926 scope.go:117] "RemoveContainer" containerID="11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.203959 4926 scope.go:117] "RemoveContainer" containerID="131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd" Nov 25 18:37:32 crc kubenswrapper[4926]: E1125 18:37:32.204683 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd\": container with ID starting with 131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd not found: ID does not exist" containerID="131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.204719 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd"} err="failed to get container status \"131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd\": rpc error: code = NotFound desc = could not find container \"131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd\": container with ID starting with 131df436d9e66a0f388f38eb54023de4e6d872cb0e0574fad9a8594df20e19cd not found: ID does not exist" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.204746 4926 scope.go:117] "RemoveContainer" containerID="617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5" Nov 25 18:37:32 crc kubenswrapper[4926]: E1125 18:37:32.205301 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5\": container with ID starting with 617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5 not found: ID does not exist" containerID="617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.205350 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5"} err="failed to get container status \"617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5\": rpc error: code = NotFound desc = could not find container \"617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5\": container with ID starting with 617fd979002444e051170ff05083f446f0ec335c66feb73efc400d55718335b5 not found: ID does not exist" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.205397 4926 scope.go:117] "RemoveContainer" containerID="11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb" Nov 25 18:37:32 crc kubenswrapper[4926]: E1125 18:37:32.205779 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb\": container with ID starting with 11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb not found: ID does not exist" containerID="11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.205803 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb"} err="failed to get container status \"11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb\": rpc error: code = NotFound desc = could not find container \"11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb\": container with ID starting with 11441e4c7681e77efeb94a1615bb2c9f9dc82557ea34695ccdcb12cee115b3cb not found: ID does not exist" Nov 25 18:37:32 crc kubenswrapper[4926]: I1125 18:37:32.347880 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" path="/var/lib/kubelet/pods/2985bb1b-465d-4fbe-9a08-5442f0d02190/volumes" Nov 25 18:37:33 crc kubenswrapper[4926]: I1125 18:37:33.541125 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:37:33 crc kubenswrapper[4926]: I1125 18:37:33.541501 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.126326 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l54lw"] Nov 25 18:37:36 crc kubenswrapper[4926]: E1125 18:37:36.127904 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="registry-server" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.127955 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="registry-server" Nov 25 18:37:36 crc kubenswrapper[4926]: E1125 18:37:36.128043 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="extract-utilities" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.128063 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="extract-utilities" Nov 25 18:37:36 crc kubenswrapper[4926]: E1125 18:37:36.128130 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="extract-content" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.128359 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="extract-content" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.128924 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="2985bb1b-465d-4fbe-9a08-5442f0d02190" containerName="registry-server" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.132516 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.143444 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l54lw"] Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.256957 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-utilities\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.257170 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-catalog-content\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.257493 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwplq\" (UniqueName: \"kubernetes.io/projected/38d725d1-1001-42c4-9bba-202e0aa4d953-kube-api-access-wwplq\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.359738 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwplq\" (UniqueName: \"kubernetes.io/projected/38d725d1-1001-42c4-9bba-202e0aa4d953-kube-api-access-wwplq\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.359792 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-utilities\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.359921 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-catalog-content\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.360807 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-catalog-content\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.360821 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-utilities\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.381283 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwplq\" (UniqueName: \"kubernetes.io/projected/38d725d1-1001-42c4-9bba-202e0aa4d953-kube-api-access-wwplq\") pod \"redhat-marketplace-l54lw\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.471345 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:36 crc kubenswrapper[4926]: I1125 18:37:36.963426 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l54lw"] Nov 25 18:37:36 crc kubenswrapper[4926]: W1125 18:37:36.973567 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38d725d1_1001_42c4_9bba_202e0aa4d953.slice/crio-25e337c89ae812b0ca1a179f3627fa7dceba1fad81b79c751fcba5d853427967 WatchSource:0}: Error finding container 25e337c89ae812b0ca1a179f3627fa7dceba1fad81b79c751fcba5d853427967: Status 404 returned error can't find the container with id 25e337c89ae812b0ca1a179f3627fa7dceba1fad81b79c751fcba5d853427967 Nov 25 18:37:37 crc kubenswrapper[4926]: I1125 18:37:37.110479 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l54lw" event={"ID":"38d725d1-1001-42c4-9bba-202e0aa4d953","Type":"ContainerStarted","Data":"25e337c89ae812b0ca1a179f3627fa7dceba1fad81b79c751fcba5d853427967"} Nov 25 18:37:38 crc kubenswrapper[4926]: I1125 18:37:38.126750 4926 generic.go:334] "Generic (PLEG): container finished" podID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerID="0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab" exitCode=0 Nov 25 18:37:38 crc kubenswrapper[4926]: I1125 18:37:38.126811 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l54lw" event={"ID":"38d725d1-1001-42c4-9bba-202e0aa4d953","Type":"ContainerDied","Data":"0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab"} Nov 25 18:37:39 crc kubenswrapper[4926]: I1125 18:37:39.144580 4926 generic.go:334] "Generic (PLEG): container finished" podID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerID="60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17" exitCode=0 Nov 25 18:37:39 crc kubenswrapper[4926]: I1125 18:37:39.144649 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l54lw" event={"ID":"38d725d1-1001-42c4-9bba-202e0aa4d953","Type":"ContainerDied","Data":"60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17"} Nov 25 18:37:40 crc kubenswrapper[4926]: I1125 18:37:40.157677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l54lw" event={"ID":"38d725d1-1001-42c4-9bba-202e0aa4d953","Type":"ContainerStarted","Data":"3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032"} Nov 25 18:37:40 crc kubenswrapper[4926]: I1125 18:37:40.188912 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l54lw" podStartSLOduration=2.821238051 podStartE2EDuration="4.188889616s" podCreationTimestamp="2025-11-25 18:37:36 +0000 UTC" firstStartedPulling="2025-11-25 18:37:38.132820713 +0000 UTC m=+1488.518334348" lastFinishedPulling="2025-11-25 18:37:39.500472308 +0000 UTC m=+1489.885985913" observedRunningTime="2025-11-25 18:37:40.179778379 +0000 UTC m=+1490.565292004" watchObservedRunningTime="2025-11-25 18:37:40.188889616 +0000 UTC m=+1490.574403231" Nov 25 18:37:46 crc kubenswrapper[4926]: I1125 18:37:46.472868 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:46 crc kubenswrapper[4926]: I1125 18:37:46.473788 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:46 crc kubenswrapper[4926]: I1125 18:37:46.524349 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:47 crc kubenswrapper[4926]: I1125 18:37:47.317649 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:47 crc kubenswrapper[4926]: I1125 18:37:47.393953 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l54lw"] Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.262847 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l54lw" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="registry-server" containerID="cri-o://3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032" gracePeriod=2 Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.897636 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.960044 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-catalog-content\") pod \"38d725d1-1001-42c4-9bba-202e0aa4d953\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.960240 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwplq\" (UniqueName: \"kubernetes.io/projected/38d725d1-1001-42c4-9bba-202e0aa4d953-kube-api-access-wwplq\") pod \"38d725d1-1001-42c4-9bba-202e0aa4d953\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.961672 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-utilities\") pod \"38d725d1-1001-42c4-9bba-202e0aa4d953\" (UID: \"38d725d1-1001-42c4-9bba-202e0aa4d953\") " Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.962473 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-utilities" (OuterVolumeSpecName: "utilities") pod "38d725d1-1001-42c4-9bba-202e0aa4d953" (UID: "38d725d1-1001-42c4-9bba-202e0aa4d953"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.962870 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.967756 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38d725d1-1001-42c4-9bba-202e0aa4d953-kube-api-access-wwplq" (OuterVolumeSpecName: "kube-api-access-wwplq") pod "38d725d1-1001-42c4-9bba-202e0aa4d953" (UID: "38d725d1-1001-42c4-9bba-202e0aa4d953"). InnerVolumeSpecName "kube-api-access-wwplq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:37:49 crc kubenswrapper[4926]: I1125 18:37:49.975775 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "38d725d1-1001-42c4-9bba-202e0aa4d953" (UID: "38d725d1-1001-42c4-9bba-202e0aa4d953"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.065160 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38d725d1-1001-42c4-9bba-202e0aa4d953-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.065223 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwplq\" (UniqueName: \"kubernetes.io/projected/38d725d1-1001-42c4-9bba-202e0aa4d953-kube-api-access-wwplq\") on node \"crc\" DevicePath \"\"" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.308546 4926 generic.go:334] "Generic (PLEG): container finished" podID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerID="3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032" exitCode=0 Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.308600 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l54lw" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.308590 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l54lw" event={"ID":"38d725d1-1001-42c4-9bba-202e0aa4d953","Type":"ContainerDied","Data":"3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032"} Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.309737 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l54lw" event={"ID":"38d725d1-1001-42c4-9bba-202e0aa4d953","Type":"ContainerDied","Data":"25e337c89ae812b0ca1a179f3627fa7dceba1fad81b79c751fcba5d853427967"} Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.309791 4926 scope.go:117] "RemoveContainer" containerID="3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.330539 4926 scope.go:117] "RemoveContainer" containerID="60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.360741 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l54lw"] Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.369369 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l54lw"] Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.383454 4926 scope.go:117] "RemoveContainer" containerID="0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.411233 4926 scope.go:117] "RemoveContainer" containerID="3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032" Nov 25 18:37:50 crc kubenswrapper[4926]: E1125 18:37:50.411602 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032\": container with ID starting with 3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032 not found: ID does not exist" containerID="3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.411650 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032"} err="failed to get container status \"3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032\": rpc error: code = NotFound desc = could not find container \"3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032\": container with ID starting with 3a4f2d4844efa05b7cd7e889e414aceb33faa48ea22532bb811971045a0a1032 not found: ID does not exist" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.411679 4926 scope.go:117] "RemoveContainer" containerID="60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17" Nov 25 18:37:50 crc kubenswrapper[4926]: E1125 18:37:50.411990 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17\": container with ID starting with 60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17 not found: ID does not exist" containerID="60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.412050 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17"} err="failed to get container status \"60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17\": rpc error: code = NotFound desc = could not find container \"60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17\": container with ID starting with 60e6851594d4060132d4ac372fd2497839ab8e61760024801ddce5ea722fcd17 not found: ID does not exist" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.412071 4926 scope.go:117] "RemoveContainer" containerID="0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab" Nov 25 18:37:50 crc kubenswrapper[4926]: E1125 18:37:50.412251 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab\": container with ID starting with 0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab not found: ID does not exist" containerID="0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab" Nov 25 18:37:50 crc kubenswrapper[4926]: I1125 18:37:50.412278 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab"} err="failed to get container status \"0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab\": rpc error: code = NotFound desc = could not find container \"0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab\": container with ID starting with 0ee46ab82e72642677845718a9a2b4325c67634b02f14d9411c434290ed9d4ab not found: ID does not exist" Nov 25 18:37:52 crc kubenswrapper[4926]: I1125 18:37:52.351765 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" path="/var/lib/kubelet/pods/38d725d1-1001-42c4-9bba-202e0aa4d953/volumes" Nov 25 18:38:00 crc kubenswrapper[4926]: E1125 18:38:00.345731 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38d725d1_1001_42c4_9bba_202e0aa4d953.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:38:03 crc kubenswrapper[4926]: I1125 18:38:03.541603 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:38:03 crc kubenswrapper[4926]: I1125 18:38:03.542210 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:38:10 crc kubenswrapper[4926]: E1125 18:38:10.664585 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38d725d1_1001_42c4_9bba_202e0aa4d953.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:38:19 crc kubenswrapper[4926]: I1125 18:38:19.436091 4926 scope.go:117] "RemoveContainer" containerID="90e3e242ecf190558022eeac560a84bddae81d66ba85e65e69044613f25474fd" Nov 25 18:38:19 crc kubenswrapper[4926]: I1125 18:38:19.487095 4926 scope.go:117] "RemoveContainer" containerID="04ecc17731a8f7003a6eeaa3ad4ab169febc4fffb25b0ad3ba7b22cc17c10607" Nov 25 18:38:19 crc kubenswrapper[4926]: I1125 18:38:19.531732 4926 scope.go:117] "RemoveContainer" containerID="021cbc7140a6306af030a2c0f70e9f62bf1407c823aa68b79b6f07528174129b" Nov 25 18:38:20 crc kubenswrapper[4926]: E1125 18:38:20.930869 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38d725d1_1001_42c4_9bba_202e0aa4d953.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:38:31 crc kubenswrapper[4926]: E1125 18:38:31.252734 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38d725d1_1001_42c4_9bba_202e0aa4d953.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:38:33 crc kubenswrapper[4926]: I1125 18:38:33.541838 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:38:33 crc kubenswrapper[4926]: I1125 18:38:33.542281 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:38:33 crc kubenswrapper[4926]: I1125 18:38:33.542355 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:38:33 crc kubenswrapper[4926]: I1125 18:38:33.543617 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:38:33 crc kubenswrapper[4926]: I1125 18:38:33.543742 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" gracePeriod=600 Nov 25 18:38:33 crc kubenswrapper[4926]: E1125 18:38:33.668645 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:38:34 crc kubenswrapper[4926]: I1125 18:38:34.186594 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" exitCode=0 Nov 25 18:38:34 crc kubenswrapper[4926]: I1125 18:38:34.186640 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2"} Nov 25 18:38:34 crc kubenswrapper[4926]: I1125 18:38:34.186695 4926 scope.go:117] "RemoveContainer" containerID="a8f56314785fa968a3a105a23d5d2b50a67b5ca02a86eb00cd6083866de84208" Nov 25 18:38:34 crc kubenswrapper[4926]: I1125 18:38:34.187615 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:38:34 crc kubenswrapper[4926]: E1125 18:38:34.188126 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:38:41 crc kubenswrapper[4926]: E1125 18:38:41.545954 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38d725d1_1001_42c4_9bba_202e0aa4d953.slice\": RecentStats: unable to find data in memory cache]" Nov 25 18:38:47 crc kubenswrapper[4926]: I1125 18:38:47.329681 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:38:47 crc kubenswrapper[4926]: E1125 18:38:47.330758 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:39:02 crc kubenswrapper[4926]: I1125 18:39:02.328993 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:39:02 crc kubenswrapper[4926]: E1125 18:39:02.329771 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:39:15 crc kubenswrapper[4926]: I1125 18:39:15.328738 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:39:15 crc kubenswrapper[4926]: E1125 18:39:15.330753 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:39:26 crc kubenswrapper[4926]: I1125 18:39:26.330719 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:39:26 crc kubenswrapper[4926]: E1125 18:39:26.332099 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:39:41 crc kubenswrapper[4926]: I1125 18:39:41.330415 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:39:41 crc kubenswrapper[4926]: E1125 18:39:41.334949 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:39:52 crc kubenswrapper[4926]: I1125 18:39:52.329662 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:39:52 crc kubenswrapper[4926]: E1125 18:39:52.330771 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:40:05 crc kubenswrapper[4926]: I1125 18:40:05.329432 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:40:05 crc kubenswrapper[4926]: E1125 18:40:05.330898 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:40:16 crc kubenswrapper[4926]: I1125 18:40:16.329085 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:40:16 crc kubenswrapper[4926]: E1125 18:40:16.330097 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:40:19 crc kubenswrapper[4926]: I1125 18:40:19.669461 4926 scope.go:117] "RemoveContainer" containerID="9464d7ea4e789c462aa6016cee375d0b2a8ea95abe50c5e091b8cb1fead34835" Nov 25 18:40:19 crc kubenswrapper[4926]: I1125 18:40:19.700607 4926 scope.go:117] "RemoveContainer" containerID="0f035496039235591f5f6165aa313cf746247336ff4bf426cea037b2b61907c9" Nov 25 18:40:19 crc kubenswrapper[4926]: I1125 18:40:19.731663 4926 scope.go:117] "RemoveContainer" containerID="4a2e95fa552a202c5233f34f707f0d179a9e6d9db8c07961aedbf885a279d23f" Nov 25 18:40:30 crc kubenswrapper[4926]: I1125 18:40:30.337583 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:40:30 crc kubenswrapper[4926]: E1125 18:40:30.338426 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.067432 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-vkrp7"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.084354 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6c14-account-create-update-jvtzw"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.094226 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-vkrp7"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.103198 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6c14-account-create-update-jvtzw"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.112124 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-8vpf5"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.120090 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-eebe-account-create-update-kwc6r"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.127723 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-8vpf5"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.135530 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-eebe-account-create-update-kwc6r"] Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.353197 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c2aa2ba-b60a-4fb5-9879-d825e4885c92" path="/var/lib/kubelet/pods/3c2aa2ba-b60a-4fb5-9879-d825e4885c92/volumes" Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.354518 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a669dd4-2149-4a89-bf40-6bbe36867139" path="/var/lib/kubelet/pods/5a669dd4-2149-4a89-bf40-6bbe36867139/volumes" Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.355906 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae582872-4d2a-423b-b1e4-96ae3533b44b" path="/var/lib/kubelet/pods/ae582872-4d2a-423b-b1e4-96ae3533b44b/volumes" Nov 25 18:40:36 crc kubenswrapper[4926]: I1125 18:40:36.357191 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9" path="/var/lib/kubelet/pods/d3cd10f1-1f3e-4aa1-904e-5daf3ac49ee9/volumes" Nov 25 18:40:38 crc kubenswrapper[4926]: I1125 18:40:38.051579 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-r9q84"] Nov 25 18:40:38 crc kubenswrapper[4926]: I1125 18:40:38.072246 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-de89-account-create-update-shz6t"] Nov 25 18:40:38 crc kubenswrapper[4926]: I1125 18:40:38.083802 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-r9q84"] Nov 25 18:40:38 crc kubenswrapper[4926]: I1125 18:40:38.093896 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-de89-account-create-update-shz6t"] Nov 25 18:40:38 crc kubenswrapper[4926]: I1125 18:40:38.348421 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e59d2e8-b4ea-41c7-bab7-072e4090ccf6" path="/var/lib/kubelet/pods/0e59d2e8-b4ea-41c7-bab7-072e4090ccf6/volumes" Nov 25 18:40:38 crc kubenswrapper[4926]: I1125 18:40:38.349905 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d389a676-44e5-4209-a6a7-752a361595ad" path="/var/lib/kubelet/pods/d389a676-44e5-4209-a6a7-752a361595ad/volumes" Nov 25 18:40:41 crc kubenswrapper[4926]: I1125 18:40:41.330161 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:40:41 crc kubenswrapper[4926]: E1125 18:40:41.331057 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:40:54 crc kubenswrapper[4926]: I1125 18:40:54.330612 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:40:54 crc kubenswrapper[4926]: E1125 18:40:54.331469 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:41:07 crc kubenswrapper[4926]: I1125 18:41:07.329212 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:41:07 crc kubenswrapper[4926]: E1125 18:41:07.330301 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.061618 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-64pj4"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.071547 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-b0e5-account-create-update-fbpdd"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.082237 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-fa53-account-create-update-pv9jl"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.090396 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-jjkj2"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.097268 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-b0e5-account-create-update-fbpdd"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.106314 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-fa53-account-create-update-pv9jl"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.113739 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-jjkj2"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.121425 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-64pj4"] Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.368008 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d88d8c5-1bd2-4c14-9fe0-516455a79891" path="/var/lib/kubelet/pods/0d88d8c5-1bd2-4c14-9fe0-516455a79891/volumes" Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.369894 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d129a26-451d-4572-8161-18fc1c5be7dd" path="/var/lib/kubelet/pods/8d129a26-451d-4572-8161-18fc1c5be7dd/volumes" Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.371634 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="985212cb-7a46-4ade-b877-cb67fd5ebf66" path="/var/lib/kubelet/pods/985212cb-7a46-4ade-b877-cb67fd5ebf66/volumes" Nov 25 18:41:12 crc kubenswrapper[4926]: I1125 18:41:12.373318 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca121447-f823-4aa7-b9ad-d3d8bd7d92bc" path="/var/lib/kubelet/pods/ca121447-f823-4aa7-b9ad-d3d8bd7d92bc/volumes" Nov 25 18:41:14 crc kubenswrapper[4926]: I1125 18:41:14.746175 4926 generic.go:334] "Generic (PLEG): container finished" podID="c38a7543-0881-45e4-b1b3-1c515379526a" containerID="1409c45bcbf4380f04674ec0476b93e87ef8b18e6cfb4bff4c53f6cbc0ec8507" exitCode=0 Nov 25 18:41:14 crc kubenswrapper[4926]: I1125 18:41:14.746403 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" event={"ID":"c38a7543-0881-45e4-b1b3-1c515379526a","Type":"ContainerDied","Data":"1409c45bcbf4380f04674ec0476b93e87ef8b18e6cfb4bff4c53f6cbc0ec8507"} Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.047562 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-586d-account-create-update-wqnvl"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.054835 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-586d-account-create-update-wqnvl"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.066642 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-4wq2h"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.075682 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-jncjg"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.100782 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-9639-account-create-update-l4sqt"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.110510 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-jncjg"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.125069 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-9639-account-create-update-l4sqt"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.141365 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-4wq2h"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.232612 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.318990 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-ssh-key\") pod \"c38a7543-0881-45e4-b1b3-1c515379526a\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.319090 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdf8r\" (UniqueName: \"kubernetes.io/projected/c38a7543-0881-45e4-b1b3-1c515379526a-kube-api-access-vdf8r\") pod \"c38a7543-0881-45e4-b1b3-1c515379526a\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.319142 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-inventory\") pod \"c38a7543-0881-45e4-b1b3-1c515379526a\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.319221 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-bootstrap-combined-ca-bundle\") pod \"c38a7543-0881-45e4-b1b3-1c515379526a\" (UID: \"c38a7543-0881-45e4-b1b3-1c515379526a\") " Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.324765 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c38a7543-0881-45e4-b1b3-1c515379526a-kube-api-access-vdf8r" (OuterVolumeSpecName: "kube-api-access-vdf8r") pod "c38a7543-0881-45e4-b1b3-1c515379526a" (UID: "c38a7543-0881-45e4-b1b3-1c515379526a"). InnerVolumeSpecName "kube-api-access-vdf8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.326919 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "c38a7543-0881-45e4-b1b3-1c515379526a" (UID: "c38a7543-0881-45e4-b1b3-1c515379526a"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.350626 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-inventory" (OuterVolumeSpecName: "inventory") pod "c38a7543-0881-45e4-b1b3-1c515379526a" (UID: "c38a7543-0881-45e4-b1b3-1c515379526a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.352579 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c8d247f-750f-432d-8c0e-8e5c87cca18e" path="/var/lib/kubelet/pods/3c8d247f-750f-432d-8c0e-8e5c87cca18e/volumes" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.353625 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2" path="/var/lib/kubelet/pods/5d19bb34-e312-4c13-9bbb-feb6cd4c7cc2/volumes" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.354217 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6eb1ce46-a836-47f2-b91a-99161a7e66cd" path="/var/lib/kubelet/pods/6eb1ce46-a836-47f2-b91a-99161a7e66cd/volumes" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.354855 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6" path="/var/lib/kubelet/pods/ba8ba4ab-f0d8-4f8e-9a8e-d3bcb07b54f6/volumes" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.357531 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c38a7543-0881-45e4-b1b3-1c515379526a" (UID: "c38a7543-0881-45e4-b1b3-1c515379526a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.422972 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.423016 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdf8r\" (UniqueName: \"kubernetes.io/projected/c38a7543-0881-45e4-b1b3-1c515379526a-kube-api-access-vdf8r\") on node \"crc\" DevicePath \"\"" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.423031 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.423045 4926 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38a7543-0881-45e4-b1b3-1c515379526a-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.771631 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" event={"ID":"c38a7543-0881-45e4-b1b3-1c515379526a","Type":"ContainerDied","Data":"13056d66996669334205b29d51eee432f0fda3eafbe975211c0b19997411e296"} Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.771683 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13056d66996669334205b29d51eee432f0fda3eafbe975211c0b19997411e296" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.771754 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.901773 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv"] Nov 25 18:41:16 crc kubenswrapper[4926]: E1125 18:41:16.902160 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="extract-utilities" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.902176 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="extract-utilities" Nov 25 18:41:16 crc kubenswrapper[4926]: E1125 18:41:16.902197 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38a7543-0881-45e4-b1b3-1c515379526a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.902204 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38a7543-0881-45e4-b1b3-1c515379526a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 18:41:16 crc kubenswrapper[4926]: E1125 18:41:16.902218 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="extract-content" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.902224 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="extract-content" Nov 25 18:41:16 crc kubenswrapper[4926]: E1125 18:41:16.902242 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="registry-server" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.902248 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="registry-server" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.902428 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="38d725d1-1001-42c4-9bba-202e0aa4d953" containerName="registry-server" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.902457 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38a7543-0881-45e4-b1b3-1c515379526a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.903120 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.905777 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.906153 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.906418 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.918530 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv"] Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.919013 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.934425 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfr6z\" (UniqueName: \"kubernetes.io/projected/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-kube-api-access-hfr6z\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.934475 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:16 crc kubenswrapper[4926]: I1125 18:41:16.934516 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.036511 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfr6z\" (UniqueName: \"kubernetes.io/projected/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-kube-api-access-hfr6z\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.036784 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.036880 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.050130 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.051971 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.066503 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfr6z\" (UniqueName: \"kubernetes.io/projected/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-kube-api-access-hfr6z\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.275367 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.852905 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv"] Nov 25 18:41:17 crc kubenswrapper[4926]: I1125 18:41:17.863610 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:41:18 crc kubenswrapper[4926]: I1125 18:41:18.797073 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" event={"ID":"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13","Type":"ContainerStarted","Data":"83c464cbd683b9783baf021b3ddf39729b9452e123e9638e51576bf677a40c71"} Nov 25 18:41:19 crc kubenswrapper[4926]: I1125 18:41:19.806410 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" event={"ID":"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13","Type":"ContainerStarted","Data":"d375e0cfc01eb31e0beb61dd692ba23ed66434ec2e471bab7b10351cc6df3ac2"} Nov 25 18:41:19 crc kubenswrapper[4926]: I1125 18:41:19.824829 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" podStartSLOduration=3.1068602849999998 podStartE2EDuration="3.824806354s" podCreationTimestamp="2025-11-25 18:41:16 +0000 UTC" firstStartedPulling="2025-11-25 18:41:17.863197335 +0000 UTC m=+1708.248710940" lastFinishedPulling="2025-11-25 18:41:18.581143384 +0000 UTC m=+1708.966657009" observedRunningTime="2025-11-25 18:41:19.821443694 +0000 UTC m=+1710.206957319" watchObservedRunningTime="2025-11-25 18:41:19.824806354 +0000 UTC m=+1710.210319959" Nov 25 18:41:19 crc kubenswrapper[4926]: I1125 18:41:19.849435 4926 scope.go:117] "RemoveContainer" containerID="1bedda484988ded2f362cf0e33516ba0ebc1e08f57acb7dbfaa81fa3e3ce8e94" Nov 25 18:41:19 crc kubenswrapper[4926]: I1125 18:41:19.875569 4926 scope.go:117] "RemoveContainer" containerID="0a836c7110c8b3d1dbc0e243ec3e857f0f246dd2465d16af1402ca1fc8c5e6a4" Nov 25 18:41:19 crc kubenswrapper[4926]: I1125 18:41:19.937240 4926 scope.go:117] "RemoveContainer" containerID="71a79cba2f8eb38065b3fc1f681718e85f2cd5c82ff496380a84358f54b72c2b" Nov 25 18:41:19 crc kubenswrapper[4926]: I1125 18:41:19.993674 4926 scope.go:117] "RemoveContainer" containerID="2062694dcc94a5dc24b7487b7cc566c9d3d1879cae1f70927b28183920fc0551" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.053684 4926 scope.go:117] "RemoveContainer" containerID="d04b8cec76de6e94aef1595226a3c2cb4e58304166c64c7ac3f11bbc6e855294" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.097307 4926 scope.go:117] "RemoveContainer" containerID="49a769390433ea53221af2a300b25a993c04aaf325463920a04467a8c7331611" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.139017 4926 scope.go:117] "RemoveContainer" containerID="5415f1164bfc2688bdafc9692d2efdf755869afa8b0a38fa316e7eb172a41a87" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.164196 4926 scope.go:117] "RemoveContainer" containerID="421c2b3b9b85603d26a2c16c32a7413ac0e61b5a31fa238f4bcdfead1c0d26de" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.185106 4926 scope.go:117] "RemoveContainer" containerID="394cfee1be3b0cee00702e495ad12294915035ca76e1f4f40e977bf45f0a208c" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.207547 4926 scope.go:117] "RemoveContainer" containerID="9ed5f7826121be6ef64d31324c9f90d81e46ac1093557555b93d14a7ab18d815" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.236892 4926 scope.go:117] "RemoveContainer" containerID="442df9e1b8d0d782a4bb16986ea16bb353f0fdb7a1fad932a233effc96931c3b" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.257209 4926 scope.go:117] "RemoveContainer" containerID="2efc7517b01f1cd82ba40717fbe0c947cce725478ef1f7d7d54be3f470c1e1bf" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.285103 4926 scope.go:117] "RemoveContainer" containerID="7bd2107ab042b4350792da30839205b003c53faf5f72f8207d11a4ff6f3eb4ec" Nov 25 18:41:20 crc kubenswrapper[4926]: I1125 18:41:20.308061 4926 scope.go:117] "RemoveContainer" containerID="8ed37712c887ccf88b76b6bf6a6a603208689a11b072f05093e76aad50e444b1" Nov 25 18:41:21 crc kubenswrapper[4926]: I1125 18:41:21.329084 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:41:21 crc kubenswrapper[4926]: E1125 18:41:21.329619 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:41:34 crc kubenswrapper[4926]: I1125 18:41:34.044054 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-gczcd"] Nov 25 18:41:34 crc kubenswrapper[4926]: I1125 18:41:34.060737 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-zdqbf"] Nov 25 18:41:34 crc kubenswrapper[4926]: I1125 18:41:34.071173 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-gczcd"] Nov 25 18:41:34 crc kubenswrapper[4926]: I1125 18:41:34.080326 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-zdqbf"] Nov 25 18:41:34 crc kubenswrapper[4926]: I1125 18:41:34.330046 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:41:34 crc kubenswrapper[4926]: E1125 18:41:34.330522 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:41:34 crc kubenswrapper[4926]: I1125 18:41:34.344579 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90a8c684-648e-4486-8da0-ff997b994626" path="/var/lib/kubelet/pods/90a8c684-648e-4486-8da0-ff997b994626/volumes" Nov 25 18:41:34 crc kubenswrapper[4926]: I1125 18:41:34.345151 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9865625-980a-4b3e-bb1e-53d5223db907" path="/var/lib/kubelet/pods/f9865625-980a-4b3e-bb1e-53d5223db907/volumes" Nov 25 18:41:45 crc kubenswrapper[4926]: I1125 18:41:45.329141 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:41:45 crc kubenswrapper[4926]: E1125 18:41:45.330424 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:41:57 crc kubenswrapper[4926]: I1125 18:41:57.329991 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:41:57 crc kubenswrapper[4926]: E1125 18:41:57.331257 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:42:08 crc kubenswrapper[4926]: I1125 18:42:08.329763 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:42:08 crc kubenswrapper[4926]: E1125 18:42:08.330725 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:42:10 crc kubenswrapper[4926]: I1125 18:42:10.071538 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-wvbc9"] Nov 25 18:42:10 crc kubenswrapper[4926]: I1125 18:42:10.086118 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-wvbc9"] Nov 25 18:42:10 crc kubenswrapper[4926]: I1125 18:42:10.352031 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e56a5d23-9046-44fb-b484-19e044ee5ab7" path="/var/lib/kubelet/pods/e56a5d23-9046-44fb-b484-19e044ee5ab7/volumes" Nov 25 18:42:20 crc kubenswrapper[4926]: I1125 18:42:20.345945 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:42:20 crc kubenswrapper[4926]: E1125 18:42:20.346967 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:42:20 crc kubenswrapper[4926]: I1125 18:42:20.602964 4926 scope.go:117] "RemoveContainer" containerID="edb0ac343c5bfb9b8d22be56c5801ed4c081200df785481f097931f0dd999f3c" Nov 25 18:42:20 crc kubenswrapper[4926]: I1125 18:42:20.653989 4926 scope.go:117] "RemoveContainer" containerID="dd2bd06ac63740b7822ef7b91ae4f33fb58241c0f64ff881d8b8d807092bb571" Nov 25 18:42:20 crc kubenswrapper[4926]: I1125 18:42:20.731858 4926 scope.go:117] "RemoveContainer" containerID="5db9a216fd2d187084d817e9246e63b11e0fb7838f40d5e4d3733590a9bf4e1f" Nov 25 18:42:25 crc kubenswrapper[4926]: I1125 18:42:25.041361 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-52ncr"] Nov 25 18:42:25 crc kubenswrapper[4926]: I1125 18:42:25.048831 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-pvqdw"] Nov 25 18:42:25 crc kubenswrapper[4926]: I1125 18:42:25.056342 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-52ncr"] Nov 25 18:42:25 crc kubenswrapper[4926]: I1125 18:42:25.064460 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-pvqdw"] Nov 25 18:42:26 crc kubenswrapper[4926]: I1125 18:42:26.350067 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4addbd96-e765-4c9c-b260-5a80700849d2" path="/var/lib/kubelet/pods/4addbd96-e765-4c9c-b260-5a80700849d2/volumes" Nov 25 18:42:26 crc kubenswrapper[4926]: I1125 18:42:26.352115 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2205cef-0292-4e62-b282-0acd7f50f920" path="/var/lib/kubelet/pods/f2205cef-0292-4e62-b282-0acd7f50f920/volumes" Nov 25 18:42:31 crc kubenswrapper[4926]: I1125 18:42:31.329872 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:42:31 crc kubenswrapper[4926]: E1125 18:42:31.330665 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:42:34 crc kubenswrapper[4926]: I1125 18:42:34.044865 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-stgf4"] Nov 25 18:42:34 crc kubenswrapper[4926]: I1125 18:42:34.058683 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-stgf4"] Nov 25 18:42:34 crc kubenswrapper[4926]: I1125 18:42:34.349835 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="434bea04-3768-493f-8d01-36f9c41bc811" path="/var/lib/kubelet/pods/434bea04-3768-493f-8d01-36f9c41bc811/volumes" Nov 25 18:42:42 crc kubenswrapper[4926]: I1125 18:42:42.060999 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-x8d9g"] Nov 25 18:42:42 crc kubenswrapper[4926]: I1125 18:42:42.073672 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-x8d9g"] Nov 25 18:42:42 crc kubenswrapper[4926]: I1125 18:42:42.330503 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:42:42 crc kubenswrapper[4926]: E1125 18:42:42.330959 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:42:42 crc kubenswrapper[4926]: I1125 18:42:42.349741 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="017caf97-9f18-49b8-b6e6-597c709e3420" path="/var/lib/kubelet/pods/017caf97-9f18-49b8-b6e6-597c709e3420/volumes" Nov 25 18:42:54 crc kubenswrapper[4926]: I1125 18:42:54.052397 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-jnbt6"] Nov 25 18:42:54 crc kubenswrapper[4926]: I1125 18:42:54.070491 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-jnbt6"] Nov 25 18:42:54 crc kubenswrapper[4926]: I1125 18:42:54.350362 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0138f286-e018-42de-b145-2cda09144394" path="/var/lib/kubelet/pods/0138f286-e018-42de-b145-2cda09144394/volumes" Nov 25 18:42:55 crc kubenswrapper[4926]: I1125 18:42:55.329423 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:42:55 crc kubenswrapper[4926]: E1125 18:42:55.329945 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:43:06 crc kubenswrapper[4926]: I1125 18:43:06.330561 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:43:06 crc kubenswrapper[4926]: E1125 18:43:06.331879 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:43:08 crc kubenswrapper[4926]: I1125 18:43:08.077404 4926 generic.go:334] "Generic (PLEG): container finished" podID="aff27f8d-6ea3-4441-b1a2-f9a87fdaab13" containerID="d375e0cfc01eb31e0beb61dd692ba23ed66434ec2e471bab7b10351cc6df3ac2" exitCode=0 Nov 25 18:43:08 crc kubenswrapper[4926]: I1125 18:43:08.077522 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" event={"ID":"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13","Type":"ContainerDied","Data":"d375e0cfc01eb31e0beb61dd692ba23ed66434ec2e471bab7b10351cc6df3ac2"} Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.660419 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.781160 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfr6z\" (UniqueName: \"kubernetes.io/projected/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-kube-api-access-hfr6z\") pod \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.781305 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-inventory\") pod \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.781354 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-ssh-key\") pod \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\" (UID: \"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13\") " Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.788038 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-kube-api-access-hfr6z" (OuterVolumeSpecName: "kube-api-access-hfr6z") pod "aff27f8d-6ea3-4441-b1a2-f9a87fdaab13" (UID: "aff27f8d-6ea3-4441-b1a2-f9a87fdaab13"). InnerVolumeSpecName "kube-api-access-hfr6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.810900 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-inventory" (OuterVolumeSpecName: "inventory") pod "aff27f8d-6ea3-4441-b1a2-f9a87fdaab13" (UID: "aff27f8d-6ea3-4441-b1a2-f9a87fdaab13"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.841600 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "aff27f8d-6ea3-4441-b1a2-f9a87fdaab13" (UID: "aff27f8d-6ea3-4441-b1a2-f9a87fdaab13"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.883713 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfr6z\" (UniqueName: \"kubernetes.io/projected/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-kube-api-access-hfr6z\") on node \"crc\" DevicePath \"\"" Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.883744 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:43:09 crc kubenswrapper[4926]: I1125 18:43:09.883753 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aff27f8d-6ea3-4441-b1a2-f9a87fdaab13-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.103085 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" event={"ID":"aff27f8d-6ea3-4441-b1a2-f9a87fdaab13","Type":"ContainerDied","Data":"83c464cbd683b9783baf021b3ddf39729b9452e123e9638e51576bf677a40c71"} Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.103131 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83c464cbd683b9783baf021b3ddf39729b9452e123e9638e51576bf677a40c71" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.103201 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.213342 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s"] Nov 25 18:43:10 crc kubenswrapper[4926]: E1125 18:43:10.214225 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aff27f8d-6ea3-4441-b1a2-f9a87fdaab13" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.214365 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="aff27f8d-6ea3-4441-b1a2-f9a87fdaab13" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.215146 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="aff27f8d-6ea3-4441-b1a2-f9a87fdaab13" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.217455 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.220996 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.221283 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.221578 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.225396 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s"] Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.228964 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.292815 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm7w4\" (UniqueName: \"kubernetes.io/projected/aeee2488-6e27-4b22-aa01-182c3c7429fe-kube-api-access-vm7w4\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.292888 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.293081 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.395459 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.395691 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm7w4\" (UniqueName: \"kubernetes.io/projected/aeee2488-6e27-4b22-aa01-182c3c7429fe-kube-api-access-vm7w4\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.395793 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.403272 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.403900 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.417705 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm7w4\" (UniqueName: \"kubernetes.io/projected/aeee2488-6e27-4b22-aa01-182c3c7429fe-kube-api-access-vm7w4\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:10 crc kubenswrapper[4926]: I1125 18:43:10.560320 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:43:11 crc kubenswrapper[4926]: I1125 18:43:11.238113 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s"] Nov 25 18:43:12 crc kubenswrapper[4926]: I1125 18:43:12.133223 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" event={"ID":"aeee2488-6e27-4b22-aa01-182c3c7429fe","Type":"ContainerStarted","Data":"7be39e7e47bbca5e77d2917ef7be3e81057a8cadb00536b006fb92b3d9601463"} Nov 25 18:43:12 crc kubenswrapper[4926]: I1125 18:43:12.133529 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" event={"ID":"aeee2488-6e27-4b22-aa01-182c3c7429fe","Type":"ContainerStarted","Data":"9f09e3c5fd0aebfe8462551bf43a443399dc99c9112303dfb347e2d1da08f71a"} Nov 25 18:43:12 crc kubenswrapper[4926]: I1125 18:43:12.148585 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" podStartSLOduration=1.721013897 podStartE2EDuration="2.148566562s" podCreationTimestamp="2025-11-25 18:43:10 +0000 UTC" firstStartedPulling="2025-11-25 18:43:11.24472967 +0000 UTC m=+1821.630243265" lastFinishedPulling="2025-11-25 18:43:11.672282325 +0000 UTC m=+1822.057795930" observedRunningTime="2025-11-25 18:43:12.145032104 +0000 UTC m=+1822.530545709" watchObservedRunningTime="2025-11-25 18:43:12.148566562 +0000 UTC m=+1822.534080167" Nov 25 18:43:18 crc kubenswrapper[4926]: I1125 18:43:18.335484 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:43:18 crc kubenswrapper[4926]: E1125 18:43:18.336846 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:43:20 crc kubenswrapper[4926]: I1125 18:43:20.852118 4926 scope.go:117] "RemoveContainer" containerID="c532dd2be7b122443980153da9bb60983ebf9d3d9ac7994e8762e25ecbf02b10" Nov 25 18:43:20 crc kubenswrapper[4926]: I1125 18:43:20.893356 4926 scope.go:117] "RemoveContainer" containerID="ceac441c09b1ab45b04be4d908d2fa69eb4a3016e0432b031f87cc78b7d3ce19" Nov 25 18:43:20 crc kubenswrapper[4926]: I1125 18:43:20.960950 4926 scope.go:117] "RemoveContainer" containerID="3f0799115ea5976d342803d3ec287b7e11216070aa59088280c0f0799da32aef" Nov 25 18:43:21 crc kubenswrapper[4926]: I1125 18:43:21.036190 4926 scope.go:117] "RemoveContainer" containerID="520ea0e9d8dd3748068d6b0c4b1f21c3312cb88dd7caa9da843d0bd092df244a" Nov 25 18:43:21 crc kubenswrapper[4926]: I1125 18:43:21.089053 4926 scope.go:117] "RemoveContainer" containerID="7b0eb78ad49b0f6308da6f264521dae46391514392d5ea5580ac14a62c0fd47f" Nov 25 18:43:22 crc kubenswrapper[4926]: I1125 18:43:22.053789 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-fm7m4"] Nov 25 18:43:22 crc kubenswrapper[4926]: I1125 18:43:22.069311 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cda2-account-create-update-njssm"] Nov 25 18:43:22 crc kubenswrapper[4926]: I1125 18:43:22.078304 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-fm7m4"] Nov 25 18:43:22 crc kubenswrapper[4926]: I1125 18:43:22.085235 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cda2-account-create-update-njssm"] Nov 25 18:43:22 crc kubenswrapper[4926]: I1125 18:43:22.342565 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fb216ae-643a-47e9-9dce-b3f13e633e95" path="/var/lib/kubelet/pods/8fb216ae-643a-47e9-9dce-b3f13e633e95/volumes" Nov 25 18:43:22 crc kubenswrapper[4926]: I1125 18:43:22.343105 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae1b2a5c-29d8-42a3-aedc-eb296da03c2d" path="/var/lib/kubelet/pods/ae1b2a5c-29d8-42a3-aedc-eb296da03c2d/volumes" Nov 25 18:43:23 crc kubenswrapper[4926]: I1125 18:43:23.041336 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-a61c-account-create-update-ntxbc"] Nov 25 18:43:23 crc kubenswrapper[4926]: I1125 18:43:23.049647 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-a61c-account-create-update-ntxbc"] Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.074058 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-ldk4s"] Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.089920 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-2sw5v"] Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.103156 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-2sw5v"] Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.114027 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-ldk4s"] Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.121629 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-ca6b-account-create-update-9rj6z"] Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.132394 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-ca6b-account-create-update-9rj6z"] Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.356802 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46c8d0a4-afda-4df7-83f5-bfb0e46139cf" path="/var/lib/kubelet/pods/46c8d0a4-afda-4df7-83f5-bfb0e46139cf/volumes" Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.358761 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68f7c172-f0a2-43f2-b4bc-5be36758ea34" path="/var/lib/kubelet/pods/68f7c172-f0a2-43f2-b4bc-5be36758ea34/volumes" Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.359963 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79206e63-62c7-45e0-93f3-f30a28d822bb" path="/var/lib/kubelet/pods/79206e63-62c7-45e0-93f3-f30a28d822bb/volumes" Nov 25 18:43:24 crc kubenswrapper[4926]: I1125 18:43:24.361097 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0" path="/var/lib/kubelet/pods/aeb7dcb2-1a70-4226-bbb5-6b1f111a25b0/volumes" Nov 25 18:43:33 crc kubenswrapper[4926]: I1125 18:43:33.330977 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:43:33 crc kubenswrapper[4926]: E1125 18:43:33.334166 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:43:46 crc kubenswrapper[4926]: I1125 18:43:46.329668 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:43:47 crc kubenswrapper[4926]: I1125 18:43:47.534639 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"382b7da6d0bccad494a8a3403e357e319f9afc1dcce63b01d2f6de71ba19c951"} Nov 25 18:43:56 crc kubenswrapper[4926]: I1125 18:43:56.045012 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nxqr8"] Nov 25 18:43:56 crc kubenswrapper[4926]: I1125 18:43:56.059690 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nxqr8"] Nov 25 18:43:56 crc kubenswrapper[4926]: I1125 18:43:56.343701 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6986f889-6366-45eb-8f6a-e52588461c3e" path="/var/lib/kubelet/pods/6986f889-6366-45eb-8f6a-e52588461c3e/volumes" Nov 25 18:44:16 crc kubenswrapper[4926]: I1125 18:44:16.053845 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-8ngjs"] Nov 25 18:44:16 crc kubenswrapper[4926]: I1125 18:44:16.063032 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-8ngjs"] Nov 25 18:44:16 crc kubenswrapper[4926]: I1125 18:44:16.346478 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb9087b2-2d7f-4014-85f0-aaac4431a44f" path="/var/lib/kubelet/pods/eb9087b2-2d7f-4014-85f0-aaac4431a44f/volumes" Nov 25 18:44:19 crc kubenswrapper[4926]: I1125 18:44:19.032030 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f78p6"] Nov 25 18:44:19 crc kubenswrapper[4926]: I1125 18:44:19.039271 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f78p6"] Nov 25 18:44:20 crc kubenswrapper[4926]: I1125 18:44:20.353636 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9b169a7-a3f6-4b73-a317-7f9ab22625ad" path="/var/lib/kubelet/pods/b9b169a7-a3f6-4b73-a317-7f9ab22625ad/volumes" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.241996 4926 scope.go:117] "RemoveContainer" containerID="4e87aad84ec26afd36c0bba5c402fb5a163f9511a289b417105126661b4feeb4" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.289919 4926 scope.go:117] "RemoveContainer" containerID="2e114e6cf51eddfab79d846bda93db3dd4e9df7d4df99254d7315a73c35a1bb5" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.318846 4926 scope.go:117] "RemoveContainer" containerID="167a96f77e78d17987983c6d21fc209c668b8d6c21bbc3bb084514d729b4c871" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.373885 4926 scope.go:117] "RemoveContainer" containerID="92f8954e0f37e3d6cbb031ef8a8fb3701eb6fb321795f34c5ba132c7afafc824" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.439467 4926 scope.go:117] "RemoveContainer" containerID="a4d39959a369c04fdb1ca840e5d9abeeb2630a9af5b3f0d60b29451eef90549c" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.490485 4926 scope.go:117] "RemoveContainer" containerID="8ada8dfca4122a68a26eab3b2528f50d4151e5f176da9b7e3d0b762df6b777bf" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.520145 4926 scope.go:117] "RemoveContainer" containerID="df7a15ec5fbc6c93d8391912c0832ea1c8d22088a01fd7c658fa1f7b298090cc" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.559637 4926 scope.go:117] "RemoveContainer" containerID="bb1a43ff5f94f6f91bcdf3f12ffcf59bf4c7d3cfc277fbc8ff40cf19fd56eaa1" Nov 25 18:44:21 crc kubenswrapper[4926]: I1125 18:44:21.596685 4926 scope.go:117] "RemoveContainer" containerID="514a25c0dc7dec4829311eed50cd9a6abe27efd9f7bace46ea9fdd86b82befe2" Nov 25 18:44:32 crc kubenswrapper[4926]: I1125 18:44:32.036339 4926 generic.go:334] "Generic (PLEG): container finished" podID="aeee2488-6e27-4b22-aa01-182c3c7429fe" containerID="7be39e7e47bbca5e77d2917ef7be3e81057a8cadb00536b006fb92b3d9601463" exitCode=0 Nov 25 18:44:32 crc kubenswrapper[4926]: I1125 18:44:32.036414 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" event={"ID":"aeee2488-6e27-4b22-aa01-182c3c7429fe","Type":"ContainerDied","Data":"7be39e7e47bbca5e77d2917ef7be3e81057a8cadb00536b006fb92b3d9601463"} Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.467011 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.563289 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-inventory\") pod \"aeee2488-6e27-4b22-aa01-182c3c7429fe\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.563398 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vm7w4\" (UniqueName: \"kubernetes.io/projected/aeee2488-6e27-4b22-aa01-182c3c7429fe-kube-api-access-vm7w4\") pod \"aeee2488-6e27-4b22-aa01-182c3c7429fe\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.563464 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-ssh-key\") pod \"aeee2488-6e27-4b22-aa01-182c3c7429fe\" (UID: \"aeee2488-6e27-4b22-aa01-182c3c7429fe\") " Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.569121 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aeee2488-6e27-4b22-aa01-182c3c7429fe-kube-api-access-vm7w4" (OuterVolumeSpecName: "kube-api-access-vm7w4") pod "aeee2488-6e27-4b22-aa01-182c3c7429fe" (UID: "aeee2488-6e27-4b22-aa01-182c3c7429fe"). InnerVolumeSpecName "kube-api-access-vm7w4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.591016 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "aeee2488-6e27-4b22-aa01-182c3c7429fe" (UID: "aeee2488-6e27-4b22-aa01-182c3c7429fe"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.600353 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-inventory" (OuterVolumeSpecName: "inventory") pod "aeee2488-6e27-4b22-aa01-182c3c7429fe" (UID: "aeee2488-6e27-4b22-aa01-182c3c7429fe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.666854 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.667102 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vm7w4\" (UniqueName: \"kubernetes.io/projected/aeee2488-6e27-4b22-aa01-182c3c7429fe-kube-api-access-vm7w4\") on node \"crc\" DevicePath \"\"" Nov 25 18:44:33 crc kubenswrapper[4926]: I1125 18:44:33.667243 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aeee2488-6e27-4b22-aa01-182c3c7429fe-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.062936 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" event={"ID":"aeee2488-6e27-4b22-aa01-182c3c7429fe","Type":"ContainerDied","Data":"9f09e3c5fd0aebfe8462551bf43a443399dc99c9112303dfb347e2d1da08f71a"} Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.063229 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f09e3c5fd0aebfe8462551bf43a443399dc99c9112303dfb347e2d1da08f71a" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.063037 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.171329 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k"] Nov 25 18:44:34 crc kubenswrapper[4926]: E1125 18:44:34.172612 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aeee2488-6e27-4b22-aa01-182c3c7429fe" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.172657 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="aeee2488-6e27-4b22-aa01-182c3c7429fe" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.173182 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="aeee2488-6e27-4b22-aa01-182c3c7429fe" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.174843 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.178628 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.178934 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.179807 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.180517 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.184513 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k"] Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.282442 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.282475 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.282536 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hvqt\" (UniqueName: \"kubernetes.io/projected/9a577401-9bfd-42a3-82f9-185a545b6d3b-kube-api-access-9hvqt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.385172 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.385240 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.385450 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hvqt\" (UniqueName: \"kubernetes.io/projected/9a577401-9bfd-42a3-82f9-185a545b6d3b-kube-api-access-9hvqt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.392142 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.392359 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.403266 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hvqt\" (UniqueName: \"kubernetes.io/projected/9a577401-9bfd-42a3-82f9-185a545b6d3b-kube-api-access-9hvqt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:34 crc kubenswrapper[4926]: I1125 18:44:34.516196 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:35 crc kubenswrapper[4926]: I1125 18:44:35.085640 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k"] Nov 25 18:44:36 crc kubenswrapper[4926]: I1125 18:44:36.085804 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" event={"ID":"9a577401-9bfd-42a3-82f9-185a545b6d3b","Type":"ContainerStarted","Data":"8279c7a825c3e4a1c3b4bbc356d826e2e6bcd7ae8297d148d008848f18194f2d"} Nov 25 18:44:36 crc kubenswrapper[4926]: I1125 18:44:36.086354 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" event={"ID":"9a577401-9bfd-42a3-82f9-185a545b6d3b","Type":"ContainerStarted","Data":"182b0fd6f2f98e9de469e598a12f74f5ccfc466e93a33c98b09337cb69d57232"} Nov 25 18:44:36 crc kubenswrapper[4926]: I1125 18:44:36.106139 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" podStartSLOduration=1.6227292530000001 podStartE2EDuration="2.106109662s" podCreationTimestamp="2025-11-25 18:44:34 +0000 UTC" firstStartedPulling="2025-11-25 18:44:35.09071196 +0000 UTC m=+1905.476225565" lastFinishedPulling="2025-11-25 18:44:35.574092369 +0000 UTC m=+1905.959605974" observedRunningTime="2025-11-25 18:44:36.104845422 +0000 UTC m=+1906.490359047" watchObservedRunningTime="2025-11-25 18:44:36.106109662 +0000 UTC m=+1906.491623307" Nov 25 18:44:41 crc kubenswrapper[4926]: I1125 18:44:41.152619 4926 generic.go:334] "Generic (PLEG): container finished" podID="9a577401-9bfd-42a3-82f9-185a545b6d3b" containerID="8279c7a825c3e4a1c3b4bbc356d826e2e6bcd7ae8297d148d008848f18194f2d" exitCode=0 Nov 25 18:44:41 crc kubenswrapper[4926]: I1125 18:44:41.152728 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" event={"ID":"9a577401-9bfd-42a3-82f9-185a545b6d3b","Type":"ContainerDied","Data":"8279c7a825c3e4a1c3b4bbc356d826e2e6bcd7ae8297d148d008848f18194f2d"} Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.720085 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.786593 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-inventory\") pod \"9a577401-9bfd-42a3-82f9-185a545b6d3b\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.786673 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hvqt\" (UniqueName: \"kubernetes.io/projected/9a577401-9bfd-42a3-82f9-185a545b6d3b-kube-api-access-9hvqt\") pod \"9a577401-9bfd-42a3-82f9-185a545b6d3b\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.786917 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-ssh-key\") pod \"9a577401-9bfd-42a3-82f9-185a545b6d3b\" (UID: \"9a577401-9bfd-42a3-82f9-185a545b6d3b\") " Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.791858 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a577401-9bfd-42a3-82f9-185a545b6d3b-kube-api-access-9hvqt" (OuterVolumeSpecName: "kube-api-access-9hvqt") pod "9a577401-9bfd-42a3-82f9-185a545b6d3b" (UID: "9a577401-9bfd-42a3-82f9-185a545b6d3b"). InnerVolumeSpecName "kube-api-access-9hvqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.815860 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9a577401-9bfd-42a3-82f9-185a545b6d3b" (UID: "9a577401-9bfd-42a3-82f9-185a545b6d3b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.831277 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-inventory" (OuterVolumeSpecName: "inventory") pod "9a577401-9bfd-42a3-82f9-185a545b6d3b" (UID: "9a577401-9bfd-42a3-82f9-185a545b6d3b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.889160 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.889350 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9a577401-9bfd-42a3-82f9-185a545b6d3b-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:44:42 crc kubenswrapper[4926]: I1125 18:44:42.889361 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hvqt\" (UniqueName: \"kubernetes.io/projected/9a577401-9bfd-42a3-82f9-185a545b6d3b-kube-api-access-9hvqt\") on node \"crc\" DevicePath \"\"" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.180659 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" event={"ID":"9a577401-9bfd-42a3-82f9-185a545b6d3b","Type":"ContainerDied","Data":"182b0fd6f2f98e9de469e598a12f74f5ccfc466e93a33c98b09337cb69d57232"} Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.180700 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="182b0fd6f2f98e9de469e598a12f74f5ccfc466e93a33c98b09337cb69d57232" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.180772 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.269231 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt"] Nov 25 18:44:43 crc kubenswrapper[4926]: E1125 18:44:43.269784 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a577401-9bfd-42a3-82f9-185a545b6d3b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.269806 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a577401-9bfd-42a3-82f9-185a545b6d3b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.270109 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a577401-9bfd-42a3-82f9-185a545b6d3b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.271046 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.273210 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.273506 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.274262 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.274466 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.292964 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt"] Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.400497 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drfpd\" (UniqueName: \"kubernetes.io/projected/9f41d9b7-a000-45f2-8132-79eea20295bf-kube-api-access-drfpd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.400602 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.401596 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.503107 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.503167 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drfpd\" (UniqueName: \"kubernetes.io/projected/9f41d9b7-a000-45f2-8132-79eea20295bf-kube-api-access-drfpd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.503212 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.509346 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.509363 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.528905 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drfpd\" (UniqueName: \"kubernetes.io/projected/9f41d9b7-a000-45f2-8132-79eea20295bf-kube-api-access-drfpd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vm4lt\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:43 crc kubenswrapper[4926]: I1125 18:44:43.591578 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:44:44 crc kubenswrapper[4926]: I1125 18:44:44.200286 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt"] Nov 25 18:44:45 crc kubenswrapper[4926]: I1125 18:44:45.212358 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" event={"ID":"9f41d9b7-a000-45f2-8132-79eea20295bf","Type":"ContainerStarted","Data":"17071b2a69b0cebd080de4ad7b8084eda358f36e30d07c2d7b754fb08433a30e"} Nov 25 18:44:45 crc kubenswrapper[4926]: I1125 18:44:45.212808 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" event={"ID":"9f41d9b7-a000-45f2-8132-79eea20295bf","Type":"ContainerStarted","Data":"ce60918605b47b83c265770e4c18f8ff9776e67e2ab0064ef6dbf33eddaeb1f3"} Nov 25 18:44:45 crc kubenswrapper[4926]: I1125 18:44:45.238086 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" podStartSLOduration=1.688791978 podStartE2EDuration="2.23806249s" podCreationTimestamp="2025-11-25 18:44:43 +0000 UTC" firstStartedPulling="2025-11-25 18:44:44.202937642 +0000 UTC m=+1914.588451247" lastFinishedPulling="2025-11-25 18:44:44.752208154 +0000 UTC m=+1915.137721759" observedRunningTime="2025-11-25 18:44:45.233444086 +0000 UTC m=+1915.618957731" watchObservedRunningTime="2025-11-25 18:44:45.23806249 +0000 UTC m=+1915.623576105" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.053496 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-f2gmd"] Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.064038 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-f2gmd"] Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.141005 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh"] Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.142677 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.154417 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh"] Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.177034 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.177157 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.185026 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-config-volume\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.185089 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8nsj\" (UniqueName: \"kubernetes.io/projected/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-kube-api-access-h8nsj\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.185121 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-secret-volume\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.286747 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8nsj\" (UniqueName: \"kubernetes.io/projected/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-kube-api-access-h8nsj\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.286800 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-secret-volume\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.287032 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-config-volume\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.288005 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-config-volume\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.293002 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-secret-volume\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.307893 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8nsj\" (UniqueName: \"kubernetes.io/projected/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-kube-api-access-h8nsj\") pod \"collect-profiles-29401605-mnsnh\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.340966 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="113b163c-c6d9-498d-9743-a53689445970" path="/var/lib/kubelet/pods/113b163c-c6d9-498d-9743-a53689445970/volumes" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.500315 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:00 crc kubenswrapper[4926]: I1125 18:45:00.985323 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh"] Nov 25 18:45:01 crc kubenswrapper[4926]: I1125 18:45:01.376308 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" event={"ID":"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf","Type":"ContainerStarted","Data":"febd321d2687a4991200dbc663900495d77a00393c46000ae9ba8e5d734cd78b"} Nov 25 18:45:01 crc kubenswrapper[4926]: I1125 18:45:01.376357 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" event={"ID":"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf","Type":"ContainerStarted","Data":"ffe9f0e2fca9e36340962dbe8239b0e711770a35256f3725186b6a68abb9bd36"} Nov 25 18:45:01 crc kubenswrapper[4926]: I1125 18:45:01.396743 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" podStartSLOduration=1.396727995 podStartE2EDuration="1.396727995s" podCreationTimestamp="2025-11-25 18:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:45:01.394166095 +0000 UTC m=+1931.779679710" watchObservedRunningTime="2025-11-25 18:45:01.396727995 +0000 UTC m=+1931.782241590" Nov 25 18:45:02 crc kubenswrapper[4926]: I1125 18:45:02.387114 4926 generic.go:334] "Generic (PLEG): container finished" podID="f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" containerID="febd321d2687a4991200dbc663900495d77a00393c46000ae9ba8e5d734cd78b" exitCode=0 Nov 25 18:45:02 crc kubenswrapper[4926]: I1125 18:45:02.387166 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" event={"ID":"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf","Type":"ContainerDied","Data":"febd321d2687a4991200dbc663900495d77a00393c46000ae9ba8e5d734cd78b"} Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.740728 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.861999 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-secret-volume\") pod \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.862050 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-config-volume\") pod \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.862168 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8nsj\" (UniqueName: \"kubernetes.io/projected/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-kube-api-access-h8nsj\") pod \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\" (UID: \"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf\") " Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.863879 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-config-volume" (OuterVolumeSpecName: "config-volume") pod "f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" (UID: "f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.869751 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" (UID: "f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.873626 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-kube-api-access-h8nsj" (OuterVolumeSpecName: "kube-api-access-h8nsj") pod "f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" (UID: "f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf"). InnerVolumeSpecName "kube-api-access-h8nsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.964606 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.964650 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:45:03 crc kubenswrapper[4926]: I1125 18:45:03.964664 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8nsj\" (UniqueName: \"kubernetes.io/projected/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf-kube-api-access-h8nsj\") on node \"crc\" DevicePath \"\"" Nov 25 18:45:04 crc kubenswrapper[4926]: I1125 18:45:04.413330 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" event={"ID":"f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf","Type":"ContainerDied","Data":"ffe9f0e2fca9e36340962dbe8239b0e711770a35256f3725186b6a68abb9bd36"} Nov 25 18:45:04 crc kubenswrapper[4926]: I1125 18:45:04.413970 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffe9f0e2fca9e36340962dbe8239b0e711770a35256f3725186b6a68abb9bd36" Nov 25 18:45:04 crc kubenswrapper[4926]: I1125 18:45:04.413598 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh" Nov 25 18:45:21 crc kubenswrapper[4926]: I1125 18:45:21.845287 4926 scope.go:117] "RemoveContainer" containerID="bf6cc4fa410ede7fbd50da255fa8af3773eebdb944008e53458ff7070075fbfe" Nov 25 18:45:29 crc kubenswrapper[4926]: I1125 18:45:29.661534 4926 generic.go:334] "Generic (PLEG): container finished" podID="9f41d9b7-a000-45f2-8132-79eea20295bf" containerID="17071b2a69b0cebd080de4ad7b8084eda358f36e30d07c2d7b754fb08433a30e" exitCode=0 Nov 25 18:45:29 crc kubenswrapper[4926]: I1125 18:45:29.661652 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" event={"ID":"9f41d9b7-a000-45f2-8132-79eea20295bf","Type":"ContainerDied","Data":"17071b2a69b0cebd080de4ad7b8084eda358f36e30d07c2d7b754fb08433a30e"} Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.285213 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.385577 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-inventory\") pod \"9f41d9b7-a000-45f2-8132-79eea20295bf\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.385858 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-ssh-key\") pod \"9f41d9b7-a000-45f2-8132-79eea20295bf\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.386062 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drfpd\" (UniqueName: \"kubernetes.io/projected/9f41d9b7-a000-45f2-8132-79eea20295bf-kube-api-access-drfpd\") pod \"9f41d9b7-a000-45f2-8132-79eea20295bf\" (UID: \"9f41d9b7-a000-45f2-8132-79eea20295bf\") " Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.395613 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f41d9b7-a000-45f2-8132-79eea20295bf-kube-api-access-drfpd" (OuterVolumeSpecName: "kube-api-access-drfpd") pod "9f41d9b7-a000-45f2-8132-79eea20295bf" (UID: "9f41d9b7-a000-45f2-8132-79eea20295bf"). InnerVolumeSpecName "kube-api-access-drfpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.437779 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9f41d9b7-a000-45f2-8132-79eea20295bf" (UID: "9f41d9b7-a000-45f2-8132-79eea20295bf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.438846 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-inventory" (OuterVolumeSpecName: "inventory") pod "9f41d9b7-a000-45f2-8132-79eea20295bf" (UID: "9f41d9b7-a000-45f2-8132-79eea20295bf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.491488 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drfpd\" (UniqueName: \"kubernetes.io/projected/9f41d9b7-a000-45f2-8132-79eea20295bf-kube-api-access-drfpd\") on node \"crc\" DevicePath \"\"" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.491540 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.491559 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9f41d9b7-a000-45f2-8132-79eea20295bf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.684571 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" event={"ID":"9f41d9b7-a000-45f2-8132-79eea20295bf","Type":"ContainerDied","Data":"ce60918605b47b83c265770e4c18f8ff9776e67e2ab0064ef6dbf33eddaeb1f3"} Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.685396 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce60918605b47b83c265770e4c18f8ff9776e67e2ab0064ef6dbf33eddaeb1f3" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.684639 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vm4lt" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.789711 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d"] Nov 25 18:45:31 crc kubenswrapper[4926]: E1125 18:45:31.790181 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" containerName="collect-profiles" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.790201 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" containerName="collect-profiles" Nov 25 18:45:31 crc kubenswrapper[4926]: E1125 18:45:31.790249 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f41d9b7-a000-45f2-8132-79eea20295bf" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.790256 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f41d9b7-a000-45f2-8132-79eea20295bf" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.790468 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f41d9b7-a000-45f2-8132-79eea20295bf" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.790508 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" containerName="collect-profiles" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.791175 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.797568 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d"] Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.845295 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.845622 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.846327 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.846586 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.901545 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.901954 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwkcs\" (UniqueName: \"kubernetes.io/projected/d2105a62-0fbe-4a45-94bf-81b59115a28b-kube-api-access-cwkcs\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:31 crc kubenswrapper[4926]: I1125 18:45:31.902216 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.005121 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.005198 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwkcs\" (UniqueName: \"kubernetes.io/projected/d2105a62-0fbe-4a45-94bf-81b59115a28b-kube-api-access-cwkcs\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.005270 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.011088 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.011423 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.020288 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwkcs\" (UniqueName: \"kubernetes.io/projected/d2105a62-0fbe-4a45-94bf-81b59115a28b-kube-api-access-cwkcs\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2l97d\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.163563 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:45:32 crc kubenswrapper[4926]: I1125 18:45:32.821955 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d"] Nov 25 18:45:32 crc kubenswrapper[4926]: W1125 18:45:32.825824 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2105a62_0fbe_4a45_94bf_81b59115a28b.slice/crio-1fbac904da54173aa557663e2fd736cfc46140225468f9898c1c7c23de2a6934 WatchSource:0}: Error finding container 1fbac904da54173aa557663e2fd736cfc46140225468f9898c1c7c23de2a6934: Status 404 returned error can't find the container with id 1fbac904da54173aa557663e2fd736cfc46140225468f9898c1c7c23de2a6934 Nov 25 18:45:33 crc kubenswrapper[4926]: I1125 18:45:33.710637 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" event={"ID":"d2105a62-0fbe-4a45-94bf-81b59115a28b","Type":"ContainerStarted","Data":"d06a84f0a42f23cc3f8a3d9c7445138bc9bb85931c21e53164ee5c8e02844ff4"} Nov 25 18:45:33 crc kubenswrapper[4926]: I1125 18:45:33.711233 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" event={"ID":"d2105a62-0fbe-4a45-94bf-81b59115a28b","Type":"ContainerStarted","Data":"1fbac904da54173aa557663e2fd736cfc46140225468f9898c1c7c23de2a6934"} Nov 25 18:45:33 crc kubenswrapper[4926]: I1125 18:45:33.739052 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" podStartSLOduration=2.31350738 podStartE2EDuration="2.7390306s" podCreationTimestamp="2025-11-25 18:45:31 +0000 UTC" firstStartedPulling="2025-11-25 18:45:32.832356451 +0000 UTC m=+1963.217870096" lastFinishedPulling="2025-11-25 18:45:33.257879711 +0000 UTC m=+1963.643393316" observedRunningTime="2025-11-25 18:45:33.724459429 +0000 UTC m=+1964.109973044" watchObservedRunningTime="2025-11-25 18:45:33.7390306 +0000 UTC m=+1964.124544215" Nov 25 18:46:03 crc kubenswrapper[4926]: I1125 18:46:03.541850 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:46:03 crc kubenswrapper[4926]: I1125 18:46:03.542941 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:46:33 crc kubenswrapper[4926]: I1125 18:46:33.541230 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:46:33 crc kubenswrapper[4926]: I1125 18:46:33.542309 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:46:35 crc kubenswrapper[4926]: I1125 18:46:35.420564 4926 generic.go:334] "Generic (PLEG): container finished" podID="d2105a62-0fbe-4a45-94bf-81b59115a28b" containerID="d06a84f0a42f23cc3f8a3d9c7445138bc9bb85931c21e53164ee5c8e02844ff4" exitCode=0 Nov 25 18:46:35 crc kubenswrapper[4926]: I1125 18:46:35.420620 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" event={"ID":"d2105a62-0fbe-4a45-94bf-81b59115a28b","Type":"ContainerDied","Data":"d06a84f0a42f23cc3f8a3d9c7445138bc9bb85931c21e53164ee5c8e02844ff4"} Nov 25 18:46:36 crc kubenswrapper[4926]: I1125 18:46:36.997929 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.118472 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-ssh-key\") pod \"d2105a62-0fbe-4a45-94bf-81b59115a28b\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.118574 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwkcs\" (UniqueName: \"kubernetes.io/projected/d2105a62-0fbe-4a45-94bf-81b59115a28b-kube-api-access-cwkcs\") pod \"d2105a62-0fbe-4a45-94bf-81b59115a28b\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.118622 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-inventory\") pod \"d2105a62-0fbe-4a45-94bf-81b59115a28b\" (UID: \"d2105a62-0fbe-4a45-94bf-81b59115a28b\") " Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.132592 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2105a62-0fbe-4a45-94bf-81b59115a28b-kube-api-access-cwkcs" (OuterVolumeSpecName: "kube-api-access-cwkcs") pod "d2105a62-0fbe-4a45-94bf-81b59115a28b" (UID: "d2105a62-0fbe-4a45-94bf-81b59115a28b"). InnerVolumeSpecName "kube-api-access-cwkcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.167707 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-inventory" (OuterVolumeSpecName: "inventory") pod "d2105a62-0fbe-4a45-94bf-81b59115a28b" (UID: "d2105a62-0fbe-4a45-94bf-81b59115a28b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.171647 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d2105a62-0fbe-4a45-94bf-81b59115a28b" (UID: "d2105a62-0fbe-4a45-94bf-81b59115a28b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.221013 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.221059 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2105a62-0fbe-4a45-94bf-81b59115a28b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.221077 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwkcs\" (UniqueName: \"kubernetes.io/projected/d2105a62-0fbe-4a45-94bf-81b59115a28b-kube-api-access-cwkcs\") on node \"crc\" DevicePath \"\"" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.453587 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" event={"ID":"d2105a62-0fbe-4a45-94bf-81b59115a28b","Type":"ContainerDied","Data":"1fbac904da54173aa557663e2fd736cfc46140225468f9898c1c7c23de2a6934"} Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.453695 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fbac904da54173aa557663e2fd736cfc46140225468f9898c1c7c23de2a6934" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.453638 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2l97d" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.643872 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-757rs"] Nov 25 18:46:37 crc kubenswrapper[4926]: E1125 18:46:37.644700 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2105a62-0fbe-4a45-94bf-81b59115a28b" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.644746 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2105a62-0fbe-4a45-94bf-81b59115a28b" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.645225 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2105a62-0fbe-4a45-94bf-81b59115a28b" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.646694 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.655554 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.655555 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.656184 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.656403 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.659497 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-757rs"] Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.731778 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.731834 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.732043 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4grxx\" (UniqueName: \"kubernetes.io/projected/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-kube-api-access-4grxx\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.834364 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4grxx\" (UniqueName: \"kubernetes.io/projected/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-kube-api-access-4grxx\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.834487 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.834531 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.839690 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.852948 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.868466 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4grxx\" (UniqueName: \"kubernetes.io/projected/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-kube-api-access-4grxx\") pod \"ssh-known-hosts-edpm-deployment-757rs\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:37 crc kubenswrapper[4926]: I1125 18:46:37.980054 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:38 crc kubenswrapper[4926]: I1125 18:46:38.561389 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-757rs"] Nov 25 18:46:38 crc kubenswrapper[4926]: I1125 18:46:38.565431 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:46:39 crc kubenswrapper[4926]: I1125 18:46:39.481849 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" event={"ID":"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee","Type":"ContainerStarted","Data":"3ad2bb845d8c610927efb9d197662104a0c549a6e8a65a4cd6914318040d3643"} Nov 25 18:46:39 crc kubenswrapper[4926]: I1125 18:46:39.482854 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" event={"ID":"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee","Type":"ContainerStarted","Data":"1f0bf75ad793d8afcf5f074fa39b8d61e9e10f13fc628e50f4f2a36338f6d0f0"} Nov 25 18:46:39 crc kubenswrapper[4926]: I1125 18:46:39.510924 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" podStartSLOduration=2.025854527 podStartE2EDuration="2.510907529s" podCreationTimestamp="2025-11-25 18:46:37 +0000 UTC" firstStartedPulling="2025-11-25 18:46:38.56519496 +0000 UTC m=+2028.950708565" lastFinishedPulling="2025-11-25 18:46:39.050247922 +0000 UTC m=+2029.435761567" observedRunningTime="2025-11-25 18:46:39.505419635 +0000 UTC m=+2029.890933260" watchObservedRunningTime="2025-11-25 18:46:39.510907529 +0000 UTC m=+2029.896421134" Nov 25 18:46:47 crc kubenswrapper[4926]: I1125 18:46:47.578633 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee" containerID="3ad2bb845d8c610927efb9d197662104a0c549a6e8a65a4cd6914318040d3643" exitCode=0 Nov 25 18:46:47 crc kubenswrapper[4926]: I1125 18:46:47.578829 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" event={"ID":"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee","Type":"ContainerDied","Data":"3ad2bb845d8c610927efb9d197662104a0c549a6e8a65a4cd6914318040d3643"} Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.172438 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.334413 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-ssh-key-openstack-edpm-ipam\") pod \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.334618 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4grxx\" (UniqueName: \"kubernetes.io/projected/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-kube-api-access-4grxx\") pod \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.334678 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-inventory-0\") pod \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\" (UID: \"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee\") " Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.342752 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-kube-api-access-4grxx" (OuterVolumeSpecName: "kube-api-access-4grxx") pod "d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee" (UID: "d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee"). InnerVolumeSpecName "kube-api-access-4grxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.363324 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee" (UID: "d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.369827 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee" (UID: "d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.440283 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4grxx\" (UniqueName: \"kubernetes.io/projected/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-kube-api-access-4grxx\") on node \"crc\" DevicePath \"\"" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.440329 4926 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.440348 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.602820 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" event={"ID":"d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee","Type":"ContainerDied","Data":"1f0bf75ad793d8afcf5f074fa39b8d61e9e10f13fc628e50f4f2a36338f6d0f0"} Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.603235 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f0bf75ad793d8afcf5f074fa39b8d61e9e10f13fc628e50f4f2a36338f6d0f0" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.602865 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-757rs" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.714915 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf"] Nov 25 18:46:49 crc kubenswrapper[4926]: E1125 18:46:49.715608 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee" containerName="ssh-known-hosts-edpm-deployment" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.715641 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee" containerName="ssh-known-hosts-edpm-deployment" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.715977 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee" containerName="ssh-known-hosts-edpm-deployment" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.717097 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.721853 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.721891 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.721953 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.722333 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.732662 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf"] Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.845941 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2gf6\" (UniqueName: \"kubernetes.io/projected/7e80b012-93ee-4623-b5fd-7d65987728a9-kube-api-access-p2gf6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.846015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.846177 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.948666 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2gf6\" (UniqueName: \"kubernetes.io/projected/7e80b012-93ee-4623-b5fd-7d65987728a9-kube-api-access-p2gf6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.948839 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.949185 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.961987 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.998064 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:49 crc kubenswrapper[4926]: I1125 18:46:49.999554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2gf6\" (UniqueName: \"kubernetes.io/projected/7e80b012-93ee-4623-b5fd-7d65987728a9-kube-api-access-p2gf6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-v2dkf\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:50 crc kubenswrapper[4926]: I1125 18:46:50.047978 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:46:50 crc kubenswrapper[4926]: I1125 18:46:50.611518 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf"] Nov 25 18:46:51 crc kubenswrapper[4926]: I1125 18:46:51.149266 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:46:51 crc kubenswrapper[4926]: I1125 18:46:51.622511 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" event={"ID":"7e80b012-93ee-4623-b5fd-7d65987728a9","Type":"ContainerStarted","Data":"883696ff15280548f70847297218399f2110a9f0faa2e336bb9d9660985356fa"} Nov 25 18:46:51 crc kubenswrapper[4926]: I1125 18:46:51.622824 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" event={"ID":"7e80b012-93ee-4623-b5fd-7d65987728a9","Type":"ContainerStarted","Data":"501803b1e8e2dcbb70e2cb7ece54e68d4b6b95d0b4a348e7f173e953dba1d7e5"} Nov 25 18:46:51 crc kubenswrapper[4926]: I1125 18:46:51.648954 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" podStartSLOduration=2.135766521 podStartE2EDuration="2.648919492s" podCreationTimestamp="2025-11-25 18:46:49 +0000 UTC" firstStartedPulling="2025-11-25 18:46:50.632945499 +0000 UTC m=+2041.018459104" lastFinishedPulling="2025-11-25 18:46:51.14609846 +0000 UTC m=+2041.531612075" observedRunningTime="2025-11-25 18:46:51.639363423 +0000 UTC m=+2042.024877088" watchObservedRunningTime="2025-11-25 18:46:51.648919492 +0000 UTC m=+2042.034433137" Nov 25 18:47:00 crc kubenswrapper[4926]: I1125 18:47:00.721284 4926 generic.go:334] "Generic (PLEG): container finished" podID="7e80b012-93ee-4623-b5fd-7d65987728a9" containerID="883696ff15280548f70847297218399f2110a9f0faa2e336bb9d9660985356fa" exitCode=0 Nov 25 18:47:00 crc kubenswrapper[4926]: I1125 18:47:00.721416 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" event={"ID":"7e80b012-93ee-4623-b5fd-7d65987728a9","Type":"ContainerDied","Data":"883696ff15280548f70847297218399f2110a9f0faa2e336bb9d9660985356fa"} Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.346690 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.426574 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2gf6\" (UniqueName: \"kubernetes.io/projected/7e80b012-93ee-4623-b5fd-7d65987728a9-kube-api-access-p2gf6\") pod \"7e80b012-93ee-4623-b5fd-7d65987728a9\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.426858 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-inventory\") pod \"7e80b012-93ee-4623-b5fd-7d65987728a9\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.426975 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-ssh-key\") pod \"7e80b012-93ee-4623-b5fd-7d65987728a9\" (UID: \"7e80b012-93ee-4623-b5fd-7d65987728a9\") " Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.442294 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e80b012-93ee-4623-b5fd-7d65987728a9-kube-api-access-p2gf6" (OuterVolumeSpecName: "kube-api-access-p2gf6") pod "7e80b012-93ee-4623-b5fd-7d65987728a9" (UID: "7e80b012-93ee-4623-b5fd-7d65987728a9"). InnerVolumeSpecName "kube-api-access-p2gf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.478856 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7e80b012-93ee-4623-b5fd-7d65987728a9" (UID: "7e80b012-93ee-4623-b5fd-7d65987728a9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.487577 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-inventory" (OuterVolumeSpecName: "inventory") pod "7e80b012-93ee-4623-b5fd-7d65987728a9" (UID: "7e80b012-93ee-4623-b5fd-7d65987728a9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.529514 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.529550 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2gf6\" (UniqueName: \"kubernetes.io/projected/7e80b012-93ee-4623-b5fd-7d65987728a9-kube-api-access-p2gf6\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.529567 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e80b012-93ee-4623-b5fd-7d65987728a9-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.754859 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" event={"ID":"7e80b012-93ee-4623-b5fd-7d65987728a9","Type":"ContainerDied","Data":"501803b1e8e2dcbb70e2cb7ece54e68d4b6b95d0b4a348e7f173e953dba1d7e5"} Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.755514 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="501803b1e8e2dcbb70e2cb7ece54e68d4b6b95d0b4a348e7f173e953dba1d7e5" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.755091 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-v2dkf" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.919296 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp"] Nov 25 18:47:02 crc kubenswrapper[4926]: E1125 18:47:02.920127 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e80b012-93ee-4623-b5fd-7d65987728a9" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.920160 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e80b012-93ee-4623-b5fd-7d65987728a9" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.920586 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e80b012-93ee-4623-b5fd-7d65987728a9" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.921925 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.926023 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.926126 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.926614 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.928260 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:47:02 crc kubenswrapper[4926]: I1125 18:47:02.936788 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp"] Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.040657 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2cdt\" (UniqueName: \"kubernetes.io/projected/316453c4-7c58-4d26-aaa1-9da97a22bcb6-kube-api-access-x2cdt\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.040711 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.040749 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.142422 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2cdt\" (UniqueName: \"kubernetes.io/projected/316453c4-7c58-4d26-aaa1-9da97a22bcb6-kube-api-access-x2cdt\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.142464 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.142490 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.147487 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.149688 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.162467 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2cdt\" (UniqueName: \"kubernetes.io/projected/316453c4-7c58-4d26-aaa1-9da97a22bcb6-kube-api-access-x2cdt\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.248078 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.540984 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.541467 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.541555 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.542513 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"382b7da6d0bccad494a8a3403e357e319f9afc1dcce63b01d2f6de71ba19c951"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.542622 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://382b7da6d0bccad494a8a3403e357e319f9afc1dcce63b01d2f6de71ba19c951" gracePeriod=600 Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.783657 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="382b7da6d0bccad494a8a3403e357e319f9afc1dcce63b01d2f6de71ba19c951" exitCode=0 Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.783705 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"382b7da6d0bccad494a8a3403e357e319f9afc1dcce63b01d2f6de71ba19c951"} Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.783741 4926 scope.go:117] "RemoveContainer" containerID="a40029e68e731553d05e1710e289dd0752e684b359c47a3d7e27e86ce35877c2" Nov 25 18:47:03 crc kubenswrapper[4926]: I1125 18:47:03.934764 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp"] Nov 25 18:47:03 crc kubenswrapper[4926]: W1125 18:47:03.934769 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod316453c4_7c58_4d26_aaa1_9da97a22bcb6.slice/crio-8d3177f574835573f342515ece47992ce880d1e8233d624b6931cb66db021e4a WatchSource:0}: Error finding container 8d3177f574835573f342515ece47992ce880d1e8233d624b6931cb66db021e4a: Status 404 returned error can't find the container with id 8d3177f574835573f342515ece47992ce880d1e8233d624b6931cb66db021e4a Nov 25 18:47:04 crc kubenswrapper[4926]: I1125 18:47:04.796631 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee"} Nov 25 18:47:04 crc kubenswrapper[4926]: I1125 18:47:04.798849 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" event={"ID":"316453c4-7c58-4d26-aaa1-9da97a22bcb6","Type":"ContainerStarted","Data":"dfa76de4b7e81bb8b1f454b79019b20d9e2d7bf4e0de3f9f816dbd9746f3f8eb"} Nov 25 18:47:04 crc kubenswrapper[4926]: I1125 18:47:04.798875 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" event={"ID":"316453c4-7c58-4d26-aaa1-9da97a22bcb6","Type":"ContainerStarted","Data":"8d3177f574835573f342515ece47992ce880d1e8233d624b6931cb66db021e4a"} Nov 25 18:47:04 crc kubenswrapper[4926]: I1125 18:47:04.852735 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" podStartSLOduration=2.338371492 podStartE2EDuration="2.852702985s" podCreationTimestamp="2025-11-25 18:47:02 +0000 UTC" firstStartedPulling="2025-11-25 18:47:03.938545563 +0000 UTC m=+2054.324059208" lastFinishedPulling="2025-11-25 18:47:04.452877096 +0000 UTC m=+2054.838390701" observedRunningTime="2025-11-25 18:47:04.84433856 +0000 UTC m=+2055.229852205" watchObservedRunningTime="2025-11-25 18:47:04.852702985 +0000 UTC m=+2055.238216630" Nov 25 18:47:16 crc kubenswrapper[4926]: I1125 18:47:16.953007 4926 generic.go:334] "Generic (PLEG): container finished" podID="316453c4-7c58-4d26-aaa1-9da97a22bcb6" containerID="dfa76de4b7e81bb8b1f454b79019b20d9e2d7bf4e0de3f9f816dbd9746f3f8eb" exitCode=0 Nov 25 18:47:16 crc kubenswrapper[4926]: I1125 18:47:16.953076 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" event={"ID":"316453c4-7c58-4d26-aaa1-9da97a22bcb6","Type":"ContainerDied","Data":"dfa76de4b7e81bb8b1f454b79019b20d9e2d7bf4e0de3f9f816dbd9746f3f8eb"} Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.420357 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.517480 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-ssh-key\") pod \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.517569 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-inventory\") pod \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.517729 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2cdt\" (UniqueName: \"kubernetes.io/projected/316453c4-7c58-4d26-aaa1-9da97a22bcb6-kube-api-access-x2cdt\") pod \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\" (UID: \"316453c4-7c58-4d26-aaa1-9da97a22bcb6\") " Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.535717 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/316453c4-7c58-4d26-aaa1-9da97a22bcb6-kube-api-access-x2cdt" (OuterVolumeSpecName: "kube-api-access-x2cdt") pod "316453c4-7c58-4d26-aaa1-9da97a22bcb6" (UID: "316453c4-7c58-4d26-aaa1-9da97a22bcb6"). InnerVolumeSpecName "kube-api-access-x2cdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.550024 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "316453c4-7c58-4d26-aaa1-9da97a22bcb6" (UID: "316453c4-7c58-4d26-aaa1-9da97a22bcb6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.557237 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-inventory" (OuterVolumeSpecName: "inventory") pod "316453c4-7c58-4d26-aaa1-9da97a22bcb6" (UID: "316453c4-7c58-4d26-aaa1-9da97a22bcb6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.619956 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.619989 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/316453c4-7c58-4d26-aaa1-9da97a22bcb6-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.620001 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2cdt\" (UniqueName: \"kubernetes.io/projected/316453c4-7c58-4d26-aaa1-9da97a22bcb6-kube-api-access-x2cdt\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.985320 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" event={"ID":"316453c4-7c58-4d26-aaa1-9da97a22bcb6","Type":"ContainerDied","Data":"8d3177f574835573f342515ece47992ce880d1e8233d624b6931cb66db021e4a"} Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.985423 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp" Nov 25 18:47:18 crc kubenswrapper[4926]: I1125 18:47:18.985430 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d3177f574835573f342515ece47992ce880d1e8233d624b6931cb66db021e4a" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.139243 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx"] Nov 25 18:47:19 crc kubenswrapper[4926]: E1125 18:47:19.140165 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="316453c4-7c58-4d26-aaa1-9da97a22bcb6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.140185 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="316453c4-7c58-4d26-aaa1-9da97a22bcb6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.140496 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="316453c4-7c58-4d26-aaa1-9da97a22bcb6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.141301 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.147386 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.147427 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.147616 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.147855 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.147952 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.148283 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.148467 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.159088 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l9h7r"] Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.161604 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.162612 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.169312 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx"] Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.178454 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l9h7r"] Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232320 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-469th\" (UniqueName: \"kubernetes.io/projected/291f8668-9893-4eae-82bf-1111fc455f06-kube-api-access-469th\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232391 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232442 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232485 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232536 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232564 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232589 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232610 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232638 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232686 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-utilities\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232709 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232735 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232775 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232810 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232838 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232886 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-catalog-content\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.232960 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5296j\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-kube-api-access-5296j\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335330 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335419 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335494 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335521 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335563 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335588 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335616 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335664 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-utilities\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335686 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335709 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335754 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335786 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335819 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335864 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-catalog-content\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335942 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5296j\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-kube-api-access-5296j\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335967 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-469th\" (UniqueName: \"kubernetes.io/projected/291f8668-9893-4eae-82bf-1111fc455f06-kube-api-access-469th\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.335994 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.336343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-utilities\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.336648 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-catalog-content\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.344625 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.344646 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.345352 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.346032 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.346052 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.346840 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.346960 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.347070 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.348766 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.350664 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.350842 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.351345 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.351636 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5296j\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-kube-api-access-5296j\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.353335 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-469th\" (UniqueName: \"kubernetes.io/projected/291f8668-9893-4eae-82bf-1111fc455f06-kube-api-access-469th\") pod \"community-operators-l9h7r\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.354851 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.464474 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:47:19 crc kubenswrapper[4926]: I1125 18:47:19.499128 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:20 crc kubenswrapper[4926]: I1125 18:47:20.074620 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx"] Nov 25 18:47:20 crc kubenswrapper[4926]: I1125 18:47:20.085442 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l9h7r"] Nov 25 18:47:20 crc kubenswrapper[4926]: W1125 18:47:20.099808 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod291f8668_9893_4eae_82bf_1111fc455f06.slice/crio-0dff243159e31d8330b0c9dc58c4f33d1b3a3bb6a8f05ae9d21b5857781d8c3d WatchSource:0}: Error finding container 0dff243159e31d8330b0c9dc58c4f33d1b3a3bb6a8f05ae9d21b5857781d8c3d: Status 404 returned error can't find the container with id 0dff243159e31d8330b0c9dc58c4f33d1b3a3bb6a8f05ae9d21b5857781d8c3d Nov 25 18:47:21 crc kubenswrapper[4926]: I1125 18:47:21.008309 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" event={"ID":"b2103eff-585d-4c8b-a486-930c76be6884","Type":"ContainerStarted","Data":"1707c505cd14981d35acc75e272c0e4f5edda4991e9a1b13a54dd632a6b1e160"} Nov 25 18:47:21 crc kubenswrapper[4926]: I1125 18:47:21.008622 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" event={"ID":"b2103eff-585d-4c8b-a486-930c76be6884","Type":"ContainerStarted","Data":"3efa7d560e5d676d788c677c8224bbea176afdc8e79f2df68ff7f8fdfbc74a64"} Nov 25 18:47:21 crc kubenswrapper[4926]: I1125 18:47:21.025314 4926 generic.go:334] "Generic (PLEG): container finished" podID="291f8668-9893-4eae-82bf-1111fc455f06" containerID="bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62" exitCode=0 Nov 25 18:47:21 crc kubenswrapper[4926]: I1125 18:47:21.025391 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9h7r" event={"ID":"291f8668-9893-4eae-82bf-1111fc455f06","Type":"ContainerDied","Data":"bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62"} Nov 25 18:47:21 crc kubenswrapper[4926]: I1125 18:47:21.025445 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9h7r" event={"ID":"291f8668-9893-4eae-82bf-1111fc455f06","Type":"ContainerStarted","Data":"0dff243159e31d8330b0c9dc58c4f33d1b3a3bb6a8f05ae9d21b5857781d8c3d"} Nov 25 18:47:21 crc kubenswrapper[4926]: I1125 18:47:21.034071 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" podStartSLOduration=1.480881617 podStartE2EDuration="2.034047621s" podCreationTimestamp="2025-11-25 18:47:19 +0000 UTC" firstStartedPulling="2025-11-25 18:47:20.075771619 +0000 UTC m=+2070.461285254" lastFinishedPulling="2025-11-25 18:47:20.628937653 +0000 UTC m=+2071.014451258" observedRunningTime="2025-11-25 18:47:21.028249228 +0000 UTC m=+2071.413762873" watchObservedRunningTime="2025-11-25 18:47:21.034047621 +0000 UTC m=+2071.419561236" Nov 25 18:47:22 crc kubenswrapper[4926]: I1125 18:47:22.044083 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9h7r" event={"ID":"291f8668-9893-4eae-82bf-1111fc455f06","Type":"ContainerStarted","Data":"fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c"} Nov 25 18:47:23 crc kubenswrapper[4926]: I1125 18:47:23.058857 4926 generic.go:334] "Generic (PLEG): container finished" podID="291f8668-9893-4eae-82bf-1111fc455f06" containerID="fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c" exitCode=0 Nov 25 18:47:23 crc kubenswrapper[4926]: I1125 18:47:23.058941 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9h7r" event={"ID":"291f8668-9893-4eae-82bf-1111fc455f06","Type":"ContainerDied","Data":"fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c"} Nov 25 18:47:24 crc kubenswrapper[4926]: I1125 18:47:24.070411 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9h7r" event={"ID":"291f8668-9893-4eae-82bf-1111fc455f06","Type":"ContainerStarted","Data":"12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9"} Nov 25 18:47:24 crc kubenswrapper[4926]: I1125 18:47:24.108947 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l9h7r" podStartSLOduration=2.631299464 podStartE2EDuration="5.108914885s" podCreationTimestamp="2025-11-25 18:47:19 +0000 UTC" firstStartedPulling="2025-11-25 18:47:21.027426435 +0000 UTC m=+2071.412940040" lastFinishedPulling="2025-11-25 18:47:23.505041866 +0000 UTC m=+2073.890555461" observedRunningTime="2025-11-25 18:47:24.093345037 +0000 UTC m=+2074.478858653" watchObservedRunningTime="2025-11-25 18:47:24.108914885 +0000 UTC m=+2074.494428490" Nov 25 18:47:29 crc kubenswrapper[4926]: I1125 18:47:29.500913 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:29 crc kubenswrapper[4926]: I1125 18:47:29.501392 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:29 crc kubenswrapper[4926]: I1125 18:47:29.565357 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:30 crc kubenswrapper[4926]: I1125 18:47:30.196601 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:30 crc kubenswrapper[4926]: I1125 18:47:30.243146 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l9h7r"] Nov 25 18:47:32 crc kubenswrapper[4926]: I1125 18:47:32.927436 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l9h7r" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="registry-server" containerID="cri-o://12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9" gracePeriod=2 Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.412485 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.526150 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-catalog-content\") pod \"291f8668-9893-4eae-82bf-1111fc455f06\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.526331 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-utilities\") pod \"291f8668-9893-4eae-82bf-1111fc455f06\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.526451 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-469th\" (UniqueName: \"kubernetes.io/projected/291f8668-9893-4eae-82bf-1111fc455f06-kube-api-access-469th\") pod \"291f8668-9893-4eae-82bf-1111fc455f06\" (UID: \"291f8668-9893-4eae-82bf-1111fc455f06\") " Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.528215 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-utilities" (OuterVolumeSpecName: "utilities") pod "291f8668-9893-4eae-82bf-1111fc455f06" (UID: "291f8668-9893-4eae-82bf-1111fc455f06"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.534776 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/291f8668-9893-4eae-82bf-1111fc455f06-kube-api-access-469th" (OuterVolumeSpecName: "kube-api-access-469th") pod "291f8668-9893-4eae-82bf-1111fc455f06" (UID: "291f8668-9893-4eae-82bf-1111fc455f06"). InnerVolumeSpecName "kube-api-access-469th". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.587019 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "291f8668-9893-4eae-82bf-1111fc455f06" (UID: "291f8668-9893-4eae-82bf-1111fc455f06"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.628742 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.628820 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-469th\" (UniqueName: \"kubernetes.io/projected/291f8668-9893-4eae-82bf-1111fc455f06-kube-api-access-469th\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.628838 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/291f8668-9893-4eae-82bf-1111fc455f06-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.940209 4926 generic.go:334] "Generic (PLEG): container finished" podID="291f8668-9893-4eae-82bf-1111fc455f06" containerID="12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9" exitCode=0 Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.940257 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9h7r" event={"ID":"291f8668-9893-4eae-82bf-1111fc455f06","Type":"ContainerDied","Data":"12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9"} Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.940299 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9h7r" event={"ID":"291f8668-9893-4eae-82bf-1111fc455f06","Type":"ContainerDied","Data":"0dff243159e31d8330b0c9dc58c4f33d1b3a3bb6a8f05ae9d21b5857781d8c3d"} Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.940323 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9h7r" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.940327 4926 scope.go:117] "RemoveContainer" containerID="12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.970309 4926 scope.go:117] "RemoveContainer" containerID="fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c" Nov 25 18:47:33 crc kubenswrapper[4926]: I1125 18:47:33.997561 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l9h7r"] Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.014358 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l9h7r"] Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.018674 4926 scope.go:117] "RemoveContainer" containerID="bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62" Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.076700 4926 scope.go:117] "RemoveContainer" containerID="12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9" Nov 25 18:47:34 crc kubenswrapper[4926]: E1125 18:47:34.077113 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9\": container with ID starting with 12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9 not found: ID does not exist" containerID="12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9" Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.077158 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9"} err="failed to get container status \"12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9\": rpc error: code = NotFound desc = could not find container \"12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9\": container with ID starting with 12648e4b52bb711cad6d457cbb749d1418c137c38e8e262766b051b71b9d6ee9 not found: ID does not exist" Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.077185 4926 scope.go:117] "RemoveContainer" containerID="fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c" Nov 25 18:47:34 crc kubenswrapper[4926]: E1125 18:47:34.077500 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c\": container with ID starting with fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c not found: ID does not exist" containerID="fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c" Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.077535 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c"} err="failed to get container status \"fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c\": rpc error: code = NotFound desc = could not find container \"fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c\": container with ID starting with fb95a72de7d836737b362d59bc18108eb391e23c9a23e6b26db5924471061e3c not found: ID does not exist" Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.077555 4926 scope.go:117] "RemoveContainer" containerID="bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62" Nov 25 18:47:34 crc kubenswrapper[4926]: E1125 18:47:34.077910 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62\": container with ID starting with bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62 not found: ID does not exist" containerID="bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62" Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.078025 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62"} err="failed to get container status \"bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62\": rpc error: code = NotFound desc = could not find container \"bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62\": container with ID starting with bda0fdc8585f10c8b624d4253b8eae390fec1953489d20cb3c625776fc4a6e62 not found: ID does not exist" Nov 25 18:47:34 crc kubenswrapper[4926]: I1125 18:47:34.346277 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="291f8668-9893-4eae-82bf-1111fc455f06" path="/var/lib/kubelet/pods/291f8668-9893-4eae-82bf-1111fc455f06/volumes" Nov 25 18:48:07 crc kubenswrapper[4926]: I1125 18:48:07.285707 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2103eff-585d-4c8b-a486-930c76be6884" containerID="1707c505cd14981d35acc75e272c0e4f5edda4991e9a1b13a54dd632a6b1e160" exitCode=0 Nov 25 18:48:07 crc kubenswrapper[4926]: I1125 18:48:07.285816 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" event={"ID":"b2103eff-585d-4c8b-a486-930c76be6884","Type":"ContainerDied","Data":"1707c505cd14981d35acc75e272c0e4f5edda4991e9a1b13a54dd632a6b1e160"} Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.748486 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.892830 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893095 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-ovn-default-certs-0\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893190 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-telemetry-combined-ca-bundle\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893216 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5296j\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-kube-api-access-5296j\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893234 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-bootstrap-combined-ca-bundle\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893272 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893305 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-nova-combined-ca-bundle\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893322 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-neutron-metadata-combined-ca-bundle\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893366 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-repo-setup-combined-ca-bundle\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893397 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-inventory\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893425 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893445 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ssh-key\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ovn-combined-ca-bundle\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.893641 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-libvirt-combined-ca-bundle\") pod \"b2103eff-585d-4c8b-a486-930c76be6884\" (UID: \"b2103eff-585d-4c8b-a486-930c76be6884\") " Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.900202 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.900930 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.901739 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-kube-api-access-5296j" (OuterVolumeSpecName: "kube-api-access-5296j") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "kube-api-access-5296j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.901941 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.902319 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.902653 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.903543 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.903936 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.904666 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.905615 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.905651 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.906429 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.931858 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-inventory" (OuterVolumeSpecName: "inventory") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.939722 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b2103eff-585d-4c8b-a486-930c76be6884" (UID: "b2103eff-585d-4c8b-a486-930c76be6884"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.995849 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996274 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996424 4926 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996545 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5296j\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-kube-api-access-5296j\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996639 4926 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996729 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996811 4926 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996901 4926 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.996989 4926 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.997070 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.997178 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/b2103eff-585d-4c8b-a486-930c76be6884-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.997263 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.997355 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:08 crc kubenswrapper[4926]: I1125 18:48:08.997472 4926 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2103eff-585d-4c8b-a486-930c76be6884-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.322881 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" event={"ID":"b2103eff-585d-4c8b-a486-930c76be6884","Type":"ContainerDied","Data":"3efa7d560e5d676d788c677c8224bbea176afdc8e79f2df68ff7f8fdfbc74a64"} Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.322922 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3efa7d560e5d676d788c677c8224bbea176afdc8e79f2df68ff7f8fdfbc74a64" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.322971 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.456273 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6"] Nov 25 18:48:09 crc kubenswrapper[4926]: E1125 18:48:09.456761 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="extract-utilities" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.456773 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="extract-utilities" Nov 25 18:48:09 crc kubenswrapper[4926]: E1125 18:48:09.456792 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="registry-server" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.456799 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="registry-server" Nov 25 18:48:09 crc kubenswrapper[4926]: E1125 18:48:09.456825 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2103eff-585d-4c8b-a486-930c76be6884" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.456833 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2103eff-585d-4c8b-a486-930c76be6884" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 18:48:09 crc kubenswrapper[4926]: E1125 18:48:09.456840 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="extract-content" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.456846 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="extract-content" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.457049 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2103eff-585d-4c8b-a486-930c76be6884" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.457064 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="291f8668-9893-4eae-82bf-1111fc455f06" containerName="registry-server" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.457729 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.461066 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.461236 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.462716 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.463408 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.466594 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6"] Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.467195 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.609175 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.609216 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.609323 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvh62\" (UniqueName: \"kubernetes.io/projected/c91c3c0b-35d3-402e-a758-672676d30d1d-kube-api-access-gvh62\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.609403 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.609575 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c91c3c0b-35d3-402e-a758-672676d30d1d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.711687 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c91c3c0b-35d3-402e-a758-672676d30d1d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.711749 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.711771 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.711843 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvh62\" (UniqueName: \"kubernetes.io/projected/c91c3c0b-35d3-402e-a758-672676d30d1d-kube-api-access-gvh62\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.711867 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.712927 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c91c3c0b-35d3-402e-a758-672676d30d1d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.717286 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.722716 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.728202 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.743636 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvh62\" (UniqueName: \"kubernetes.io/projected/c91c3c0b-35d3-402e-a758-672676d30d1d-kube-api-access-gvh62\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-mn7s6\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:09 crc kubenswrapper[4926]: I1125 18:48:09.791000 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:48:10 crc kubenswrapper[4926]: I1125 18:48:10.416141 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6"] Nov 25 18:48:11 crc kubenswrapper[4926]: I1125 18:48:11.347413 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" event={"ID":"c91c3c0b-35d3-402e-a758-672676d30d1d","Type":"ContainerStarted","Data":"197e7ae6418d2f6b6c604c228d9f839a0f2141d7ea256fe9dca65939dc83994f"} Nov 25 18:48:11 crc kubenswrapper[4926]: I1125 18:48:11.347718 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" event={"ID":"c91c3c0b-35d3-402e-a758-672676d30d1d","Type":"ContainerStarted","Data":"d32aa5c5f822a1c04acee50a32ba3eebb2365b8624544bf9c0c7759cbd7baf47"} Nov 25 18:48:11 crc kubenswrapper[4926]: I1125 18:48:11.375225 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" podStartSLOduration=1.9863876660000002 podStartE2EDuration="2.375205895s" podCreationTimestamp="2025-11-25 18:48:09 +0000 UTC" firstStartedPulling="2025-11-25 18:48:10.42109097 +0000 UTC m=+2120.806604575" lastFinishedPulling="2025-11-25 18:48:10.809909199 +0000 UTC m=+2121.195422804" observedRunningTime="2025-11-25 18:48:11.370858203 +0000 UTC m=+2121.756371818" watchObservedRunningTime="2025-11-25 18:48:11.375205895 +0000 UTC m=+2121.760719510" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.519138 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zl65q"] Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.522054 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.539142 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zl65q"] Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.613145 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-477qt\" (UniqueName: \"kubernetes.io/projected/270b1b63-7aa1-42d0-b14b-295d1e817768-kube-api-access-477qt\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.613262 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-catalog-content\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.613285 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-utilities\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.714897 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-477qt\" (UniqueName: \"kubernetes.io/projected/270b1b63-7aa1-42d0-b14b-295d1e817768-kube-api-access-477qt\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.715035 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-catalog-content\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.715067 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-utilities\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.715653 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-catalog-content\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.715683 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-utilities\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.737153 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-477qt\" (UniqueName: \"kubernetes.io/projected/270b1b63-7aa1-42d0-b14b-295d1e817768-kube-api-access-477qt\") pod \"redhat-marketplace-zl65q\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:30 crc kubenswrapper[4926]: I1125 18:48:30.851644 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:31 crc kubenswrapper[4926]: I1125 18:48:31.370395 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zl65q"] Nov 25 18:48:31 crc kubenswrapper[4926]: I1125 18:48:31.570215 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zl65q" event={"ID":"270b1b63-7aa1-42d0-b14b-295d1e817768","Type":"ContainerStarted","Data":"0ec319611a3fb49b0dc391d6cabc3b8ca57c016c9608e23a02ab1a8709b4a493"} Nov 25 18:48:32 crc kubenswrapper[4926]: I1125 18:48:32.586785 4926 generic.go:334] "Generic (PLEG): container finished" podID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerID="24f20ec70d43c301c4c05c0bb2ac74bf52247e40396d26b549521af6a194b2fc" exitCode=0 Nov 25 18:48:32 crc kubenswrapper[4926]: I1125 18:48:32.586876 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zl65q" event={"ID":"270b1b63-7aa1-42d0-b14b-295d1e817768","Type":"ContainerDied","Data":"24f20ec70d43c301c4c05c0bb2ac74bf52247e40396d26b549521af6a194b2fc"} Nov 25 18:48:33 crc kubenswrapper[4926]: I1125 18:48:33.598222 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zl65q" event={"ID":"270b1b63-7aa1-42d0-b14b-295d1e817768","Type":"ContainerStarted","Data":"788ac9935c2eae0a479eea3550d0db2d03f8757bce2125d97b503968845796fc"} Nov 25 18:48:34 crc kubenswrapper[4926]: I1125 18:48:34.611502 4926 generic.go:334] "Generic (PLEG): container finished" podID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerID="788ac9935c2eae0a479eea3550d0db2d03f8757bce2125d97b503968845796fc" exitCode=0 Nov 25 18:48:34 crc kubenswrapper[4926]: I1125 18:48:34.611605 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zl65q" event={"ID":"270b1b63-7aa1-42d0-b14b-295d1e817768","Type":"ContainerDied","Data":"788ac9935c2eae0a479eea3550d0db2d03f8757bce2125d97b503968845796fc"} Nov 25 18:48:35 crc kubenswrapper[4926]: I1125 18:48:35.627616 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zl65q" event={"ID":"270b1b63-7aa1-42d0-b14b-295d1e817768","Type":"ContainerStarted","Data":"11b5eeace705850ebff134dc36dfd7d69a55f13afbf21e9f8d519d596dec4f11"} Nov 25 18:48:35 crc kubenswrapper[4926]: I1125 18:48:35.660678 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zl65q" podStartSLOduration=3.223091752 podStartE2EDuration="5.660639735s" podCreationTimestamp="2025-11-25 18:48:30 +0000 UTC" firstStartedPulling="2025-11-25 18:48:32.590795617 +0000 UTC m=+2142.976309252" lastFinishedPulling="2025-11-25 18:48:35.02834358 +0000 UTC m=+2145.413857235" observedRunningTime="2025-11-25 18:48:35.647938485 +0000 UTC m=+2146.033452170" watchObservedRunningTime="2025-11-25 18:48:35.660639735 +0000 UTC m=+2146.046153390" Nov 25 18:48:40 crc kubenswrapper[4926]: I1125 18:48:40.852241 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:40 crc kubenswrapper[4926]: I1125 18:48:40.852709 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:40 crc kubenswrapper[4926]: I1125 18:48:40.906468 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:41 crc kubenswrapper[4926]: I1125 18:48:41.744641 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:41 crc kubenswrapper[4926]: I1125 18:48:41.808605 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zl65q"] Nov 25 18:48:43 crc kubenswrapper[4926]: I1125 18:48:43.715309 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zl65q" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="registry-server" containerID="cri-o://11b5eeace705850ebff134dc36dfd7d69a55f13afbf21e9f8d519d596dec4f11" gracePeriod=2 Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.724412 4926 generic.go:334] "Generic (PLEG): container finished" podID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerID="11b5eeace705850ebff134dc36dfd7d69a55f13afbf21e9f8d519d596dec4f11" exitCode=0 Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.724481 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zl65q" event={"ID":"270b1b63-7aa1-42d0-b14b-295d1e817768","Type":"ContainerDied","Data":"11b5eeace705850ebff134dc36dfd7d69a55f13afbf21e9f8d519d596dec4f11"} Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.724790 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zl65q" event={"ID":"270b1b63-7aa1-42d0-b14b-295d1e817768","Type":"ContainerDied","Data":"0ec319611a3fb49b0dc391d6cabc3b8ca57c016c9608e23a02ab1a8709b4a493"} Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.724802 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ec319611a3fb49b0dc391d6cabc3b8ca57c016c9608e23a02ab1a8709b4a493" Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.770719 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.835802 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-catalog-content\") pod \"270b1b63-7aa1-42d0-b14b-295d1e817768\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.835976 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-477qt\" (UniqueName: \"kubernetes.io/projected/270b1b63-7aa1-42d0-b14b-295d1e817768-kube-api-access-477qt\") pod \"270b1b63-7aa1-42d0-b14b-295d1e817768\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.836060 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-utilities\") pod \"270b1b63-7aa1-42d0-b14b-295d1e817768\" (UID: \"270b1b63-7aa1-42d0-b14b-295d1e817768\") " Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.837192 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-utilities" (OuterVolumeSpecName: "utilities") pod "270b1b63-7aa1-42d0-b14b-295d1e817768" (UID: "270b1b63-7aa1-42d0-b14b-295d1e817768"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.844647 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/270b1b63-7aa1-42d0-b14b-295d1e817768-kube-api-access-477qt" (OuterVolumeSpecName: "kube-api-access-477qt") pod "270b1b63-7aa1-42d0-b14b-295d1e817768" (UID: "270b1b63-7aa1-42d0-b14b-295d1e817768"). InnerVolumeSpecName "kube-api-access-477qt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.852935 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "270b1b63-7aa1-42d0-b14b-295d1e817768" (UID: "270b1b63-7aa1-42d0-b14b-295d1e817768"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.938774 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.938809 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270b1b63-7aa1-42d0-b14b-295d1e817768-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:44 crc kubenswrapper[4926]: I1125 18:48:44.938822 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-477qt\" (UniqueName: \"kubernetes.io/projected/270b1b63-7aa1-42d0-b14b-295d1e817768-kube-api-access-477qt\") on node \"crc\" DevicePath \"\"" Nov 25 18:48:45 crc kubenswrapper[4926]: I1125 18:48:45.736777 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zl65q" Nov 25 18:48:45 crc kubenswrapper[4926]: I1125 18:48:45.798020 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zl65q"] Nov 25 18:48:45 crc kubenswrapper[4926]: I1125 18:48:45.811894 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zl65q"] Nov 25 18:48:46 crc kubenswrapper[4926]: I1125 18:48:46.350614 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" path="/var/lib/kubelet/pods/270b1b63-7aa1-42d0-b14b-295d1e817768/volumes" Nov 25 18:49:03 crc kubenswrapper[4926]: I1125 18:49:03.541652 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:49:03 crc kubenswrapper[4926]: I1125 18:49:03.542412 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:49:29 crc kubenswrapper[4926]: I1125 18:49:29.255714 4926 generic.go:334] "Generic (PLEG): container finished" podID="c91c3c0b-35d3-402e-a758-672676d30d1d" containerID="197e7ae6418d2f6b6c604c228d9f839a0f2141d7ea256fe9dca65939dc83994f" exitCode=0 Nov 25 18:49:29 crc kubenswrapper[4926]: I1125 18:49:29.255785 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" event={"ID":"c91c3c0b-35d3-402e-a758-672676d30d1d","Type":"ContainerDied","Data":"197e7ae6418d2f6b6c604c228d9f839a0f2141d7ea256fe9dca65939dc83994f"} Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.780181 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.837339 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-inventory\") pod \"c91c3c0b-35d3-402e-a758-672676d30d1d\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.837495 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c91c3c0b-35d3-402e-a758-672676d30d1d-ovncontroller-config-0\") pod \"c91c3c0b-35d3-402e-a758-672676d30d1d\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.837598 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ssh-key\") pod \"c91c3c0b-35d3-402e-a758-672676d30d1d\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.837639 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ovn-combined-ca-bundle\") pod \"c91c3c0b-35d3-402e-a758-672676d30d1d\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.837757 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvh62\" (UniqueName: \"kubernetes.io/projected/c91c3c0b-35d3-402e-a758-672676d30d1d-kube-api-access-gvh62\") pod \"c91c3c0b-35d3-402e-a758-672676d30d1d\" (UID: \"c91c3c0b-35d3-402e-a758-672676d30d1d\") " Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.845403 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "c91c3c0b-35d3-402e-a758-672676d30d1d" (UID: "c91c3c0b-35d3-402e-a758-672676d30d1d"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.850651 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c91c3c0b-35d3-402e-a758-672676d30d1d-kube-api-access-gvh62" (OuterVolumeSpecName: "kube-api-access-gvh62") pod "c91c3c0b-35d3-402e-a758-672676d30d1d" (UID: "c91c3c0b-35d3-402e-a758-672676d30d1d"). InnerVolumeSpecName "kube-api-access-gvh62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.867683 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-inventory" (OuterVolumeSpecName: "inventory") pod "c91c3c0b-35d3-402e-a758-672676d30d1d" (UID: "c91c3c0b-35d3-402e-a758-672676d30d1d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.869792 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c91c3c0b-35d3-402e-a758-672676d30d1d" (UID: "c91c3c0b-35d3-402e-a758-672676d30d1d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.875059 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c91c3c0b-35d3-402e-a758-672676d30d1d-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "c91c3c0b-35d3-402e-a758-672676d30d1d" (UID: "c91c3c0b-35d3-402e-a758-672676d30d1d"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.940688 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvh62\" (UniqueName: \"kubernetes.io/projected/c91c3c0b-35d3-402e-a758-672676d30d1d-kube-api-access-gvh62\") on node \"crc\" DevicePath \"\"" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.940719 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.940729 4926 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c91c3c0b-35d3-402e-a758-672676d30d1d-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.940738 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:49:30 crc kubenswrapper[4926]: I1125 18:49:30.940747 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91c3c0b-35d3-402e-a758-672676d30d1d-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.280342 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" event={"ID":"c91c3c0b-35d3-402e-a758-672676d30d1d","Type":"ContainerDied","Data":"d32aa5c5f822a1c04acee50a32ba3eebb2365b8624544bf9c0c7759cbd7baf47"} Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.280421 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-mn7s6" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.280432 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d32aa5c5f822a1c04acee50a32ba3eebb2365b8624544bf9c0c7759cbd7baf47" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.405896 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h"] Nov 25 18:49:31 crc kubenswrapper[4926]: E1125 18:49:31.406408 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="extract-utilities" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.406428 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="extract-utilities" Nov 25 18:49:31 crc kubenswrapper[4926]: E1125 18:49:31.406460 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91c3c0b-35d3-402e-a758-672676d30d1d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.406471 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91c3c0b-35d3-402e-a758-672676d30d1d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 18:49:31 crc kubenswrapper[4926]: E1125 18:49:31.406492 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="extract-content" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.406500 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="extract-content" Nov 25 18:49:31 crc kubenswrapper[4926]: E1125 18:49:31.406514 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="registry-server" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.406524 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="registry-server" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.406760 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91c3c0b-35d3-402e-a758-672676d30d1d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.406782 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="270b1b63-7aa1-42d0-b14b-295d1e817768" containerName="registry-server" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.407653 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.411404 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.411450 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.411767 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.412687 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.413016 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.414387 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.424183 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h"] Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.551220 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.551418 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.551467 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.551596 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.551694 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.551901 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2jlr\" (UniqueName: \"kubernetes.io/projected/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-kube-api-access-j2jlr\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.654188 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.654295 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.654337 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.654503 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.654594 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.654698 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2jlr\" (UniqueName: \"kubernetes.io/projected/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-kube-api-access-j2jlr\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.660105 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.660743 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.660992 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.663813 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.664822 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.675794 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2jlr\" (UniqueName: \"kubernetes.io/projected/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-kube-api-access-j2jlr\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:31 crc kubenswrapper[4926]: I1125 18:49:31.737241 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:49:32 crc kubenswrapper[4926]: I1125 18:49:32.317080 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h"] Nov 25 18:49:33 crc kubenswrapper[4926]: I1125 18:49:33.303482 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" event={"ID":"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6","Type":"ContainerStarted","Data":"c2c460d5e6b0d394da2d9295f98e6a3b6cfde806938b3b52575f6b71e7a59491"} Nov 25 18:49:33 crc kubenswrapper[4926]: I1125 18:49:33.303840 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" event={"ID":"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6","Type":"ContainerStarted","Data":"c226a7330da23f6c16bbd6e394f4bc8a96dbfb243db5342b8304c0e60b8a0e92"} Nov 25 18:49:33 crc kubenswrapper[4926]: I1125 18:49:33.338986 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" podStartSLOduration=1.858711762 podStartE2EDuration="2.338964363s" podCreationTimestamp="2025-11-25 18:49:31 +0000 UTC" firstStartedPulling="2025-11-25 18:49:32.323898651 +0000 UTC m=+2202.709412256" lastFinishedPulling="2025-11-25 18:49:32.804151242 +0000 UTC m=+2203.189664857" observedRunningTime="2025-11-25 18:49:33.335635084 +0000 UTC m=+2203.721148739" watchObservedRunningTime="2025-11-25 18:49:33.338964363 +0000 UTC m=+2203.724477988" Nov 25 18:49:33 crc kubenswrapper[4926]: I1125 18:49:33.541241 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:49:33 crc kubenswrapper[4926]: I1125 18:49:33.541339 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:50:03 crc kubenswrapper[4926]: I1125 18:50:03.541783 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:50:03 crc kubenswrapper[4926]: I1125 18:50:03.542321 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:50:03 crc kubenswrapper[4926]: I1125 18:50:03.542363 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:50:03 crc kubenswrapper[4926]: I1125 18:50:03.543157 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:50:03 crc kubenswrapper[4926]: I1125 18:50:03.543216 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" gracePeriod=600 Nov 25 18:50:03 crc kubenswrapper[4926]: E1125 18:50:03.673565 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:50:04 crc kubenswrapper[4926]: I1125 18:50:04.664321 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" exitCode=0 Nov 25 18:50:04 crc kubenswrapper[4926]: I1125 18:50:04.664400 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee"} Nov 25 18:50:04 crc kubenswrapper[4926]: I1125 18:50:04.664706 4926 scope.go:117] "RemoveContainer" containerID="382b7da6d0bccad494a8a3403e357e319f9afc1dcce63b01d2f6de71ba19c951" Nov 25 18:50:04 crc kubenswrapper[4926]: I1125 18:50:04.666249 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:50:04 crc kubenswrapper[4926]: E1125 18:50:04.667530 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:50:16 crc kubenswrapper[4926]: I1125 18:50:16.330066 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:50:16 crc kubenswrapper[4926]: E1125 18:50:16.332214 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:50:28 crc kubenswrapper[4926]: I1125 18:50:28.330156 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:50:28 crc kubenswrapper[4926]: E1125 18:50:28.331290 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:50:30 crc kubenswrapper[4926]: I1125 18:50:30.984894 4926 generic.go:334] "Generic (PLEG): container finished" podID="8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" containerID="c2c460d5e6b0d394da2d9295f98e6a3b6cfde806938b3b52575f6b71e7a59491" exitCode=0 Nov 25 18:50:30 crc kubenswrapper[4926]: I1125 18:50:30.984948 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" event={"ID":"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6","Type":"ContainerDied","Data":"c2c460d5e6b0d394da2d9295f98e6a3b6cfde806938b3b52575f6b71e7a59491"} Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.429582 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.586144 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-ssh-key\") pod \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.586212 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-inventory\") pod \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.586283 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.586393 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-nova-metadata-neutron-config-0\") pod \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.586472 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-metadata-combined-ca-bundle\") pod \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.586541 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2jlr\" (UniqueName: \"kubernetes.io/projected/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-kube-api-access-j2jlr\") pod \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\" (UID: \"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6\") " Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.592557 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-kube-api-access-j2jlr" (OuterVolumeSpecName: "kube-api-access-j2jlr") pod "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" (UID: "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6"). InnerVolumeSpecName "kube-api-access-j2jlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.594153 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" (UID: "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.618344 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-inventory" (OuterVolumeSpecName: "inventory") pod "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" (UID: "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.619576 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" (UID: "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.626415 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" (UID: "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.659284 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" (UID: "8e6b918a-9ab8-44c6-9b56-48189c2cbaf6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.689030 4926 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.689089 4926 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.689115 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2jlr\" (UniqueName: \"kubernetes.io/projected/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-kube-api-access-j2jlr\") on node \"crc\" DevicePath \"\"" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.689137 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.689155 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:50:32 crc kubenswrapper[4926]: I1125 18:50:32.689173 4926 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8e6b918a-9ab8-44c6-9b56-48189c2cbaf6-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.008517 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" event={"ID":"8e6b918a-9ab8-44c6-9b56-48189c2cbaf6","Type":"ContainerDied","Data":"c226a7330da23f6c16bbd6e394f4bc8a96dbfb243db5342b8304c0e60b8a0e92"} Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.008904 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c226a7330da23f6c16bbd6e394f4bc8a96dbfb243db5342b8304c0e60b8a0e92" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.008639 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.109983 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m"] Nov 25 18:50:33 crc kubenswrapper[4926]: E1125 18:50:33.110640 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.110674 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.111004 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e6b918a-9ab8-44c6-9b56-48189c2cbaf6" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.112045 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.114857 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.118096 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.118301 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.118499 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.118671 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.121812 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m"] Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.199299 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.199484 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p72mn\" (UniqueName: \"kubernetes.io/projected/3967079a-4360-41e8-85e9-97c74ace2a79-kube-api-access-p72mn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.199546 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.199632 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.199724 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.302168 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p72mn\" (UniqueName: \"kubernetes.io/projected/3967079a-4360-41e8-85e9-97c74ace2a79-kube-api-access-p72mn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.302271 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.302410 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.302536 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.302608 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.321627 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.321758 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.321989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.322706 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.342468 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p72mn\" (UniqueName: \"kubernetes.io/projected/3967079a-4360-41e8-85e9-97c74ace2a79-kube-api-access-p72mn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-99d7m\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:33 crc kubenswrapper[4926]: I1125 18:50:33.437594 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:50:34 crc kubenswrapper[4926]: I1125 18:50:34.089354 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m"] Nov 25 18:50:35 crc kubenswrapper[4926]: I1125 18:50:35.035677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" event={"ID":"3967079a-4360-41e8-85e9-97c74ace2a79","Type":"ContainerStarted","Data":"332ff738da9e4e5e9bbc1883495ac82980ede1244f000399a72b08ce000750f5"} Nov 25 18:50:35 crc kubenswrapper[4926]: I1125 18:50:35.036244 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" event={"ID":"3967079a-4360-41e8-85e9-97c74ace2a79","Type":"ContainerStarted","Data":"29855cae95c7afbddb5960d4e5f3ee3bbecf75feabb0f30431f578b30b870d13"} Nov 25 18:50:35 crc kubenswrapper[4926]: I1125 18:50:35.065019 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" podStartSLOduration=1.599376232 podStartE2EDuration="2.064993279s" podCreationTimestamp="2025-11-25 18:50:33 +0000 UTC" firstStartedPulling="2025-11-25 18:50:34.088088575 +0000 UTC m=+2264.473602200" lastFinishedPulling="2025-11-25 18:50:34.553705652 +0000 UTC m=+2264.939219247" observedRunningTime="2025-11-25 18:50:35.058619752 +0000 UTC m=+2265.444133367" watchObservedRunningTime="2025-11-25 18:50:35.064993279 +0000 UTC m=+2265.450506894" Nov 25 18:50:42 crc kubenswrapper[4926]: I1125 18:50:42.330286 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:50:42 crc kubenswrapper[4926]: E1125 18:50:42.331646 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:50:53 crc kubenswrapper[4926]: I1125 18:50:53.329703 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:50:53 crc kubenswrapper[4926]: E1125 18:50:53.330907 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:51:04 crc kubenswrapper[4926]: I1125 18:51:04.330646 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:51:04 crc kubenswrapper[4926]: E1125 18:51:04.332643 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.069546 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kkvkh"] Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.073586 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.095682 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kkvkh"] Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.181934 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-catalog-content\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.182090 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-utilities\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.182362 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wbvt\" (UniqueName: \"kubernetes.io/projected/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-kube-api-access-5wbvt\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.284151 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-utilities\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.284450 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wbvt\" (UniqueName: \"kubernetes.io/projected/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-kube-api-access-5wbvt\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.284515 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-catalog-content\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.284938 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-utilities\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.285057 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-catalog-content\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.310766 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wbvt\" (UniqueName: \"kubernetes.io/projected/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-kube-api-access-5wbvt\") pod \"redhat-operators-kkvkh\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.418111 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:07 crc kubenswrapper[4926]: I1125 18:51:07.926964 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kkvkh"] Nov 25 18:51:08 crc kubenswrapper[4926]: I1125 18:51:08.419041 4926 generic.go:334] "Generic (PLEG): container finished" podID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerID="51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6" exitCode=0 Nov 25 18:51:08 crc kubenswrapper[4926]: I1125 18:51:08.419134 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kkvkh" event={"ID":"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d","Type":"ContainerDied","Data":"51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6"} Nov 25 18:51:08 crc kubenswrapper[4926]: I1125 18:51:08.419494 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kkvkh" event={"ID":"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d","Type":"ContainerStarted","Data":"56b654b543ecdf4815f79b89d60ef55e7dd43a936e57992e7862704c7d264043"} Nov 25 18:51:08 crc kubenswrapper[4926]: I1125 18:51:08.863900 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j2s28"] Nov 25 18:51:08 crc kubenswrapper[4926]: I1125 18:51:08.866456 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:08 crc kubenswrapper[4926]: I1125 18:51:08.894205 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j2s28"] Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.036691 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-utilities\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.037198 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccq28\" (UniqueName: \"kubernetes.io/projected/80f16f49-2223-487d-9959-e369f60acbbb-kube-api-access-ccq28\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.037236 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-catalog-content\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.139003 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccq28\" (UniqueName: \"kubernetes.io/projected/80f16f49-2223-487d-9959-e369f60acbbb-kube-api-access-ccq28\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.139066 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-catalog-content\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.139119 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-utilities\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.139635 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-catalog-content\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.139672 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-utilities\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.175238 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccq28\" (UniqueName: \"kubernetes.io/projected/80f16f49-2223-487d-9959-e369f60acbbb-kube-api-access-ccq28\") pod \"certified-operators-j2s28\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.191222 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:09 crc kubenswrapper[4926]: I1125 18:51:09.655582 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j2s28"] Nov 25 18:51:09 crc kubenswrapper[4926]: W1125 18:51:09.664956 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80f16f49_2223_487d_9959_e369f60acbbb.slice/crio-da37a223c97aa98bffffac12810b5a88593b886f235a39ece254d9785aef1484 WatchSource:0}: Error finding container da37a223c97aa98bffffac12810b5a88593b886f235a39ece254d9785aef1484: Status 404 returned error can't find the container with id da37a223c97aa98bffffac12810b5a88593b886f235a39ece254d9785aef1484 Nov 25 18:51:10 crc kubenswrapper[4926]: I1125 18:51:10.448174 4926 generic.go:334] "Generic (PLEG): container finished" podID="80f16f49-2223-487d-9959-e369f60acbbb" containerID="ac7d8b70e3229c2524b701542eb1415e65fa4ff16533b6d73138380f8c31da15" exitCode=0 Nov 25 18:51:10 crc kubenswrapper[4926]: I1125 18:51:10.448238 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j2s28" event={"ID":"80f16f49-2223-487d-9959-e369f60acbbb","Type":"ContainerDied","Data":"ac7d8b70e3229c2524b701542eb1415e65fa4ff16533b6d73138380f8c31da15"} Nov 25 18:51:10 crc kubenswrapper[4926]: I1125 18:51:10.448571 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j2s28" event={"ID":"80f16f49-2223-487d-9959-e369f60acbbb","Type":"ContainerStarted","Data":"da37a223c97aa98bffffac12810b5a88593b886f235a39ece254d9785aef1484"} Nov 25 18:51:11 crc kubenswrapper[4926]: I1125 18:51:11.464172 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j2s28" event={"ID":"80f16f49-2223-487d-9959-e369f60acbbb","Type":"ContainerStarted","Data":"acb48a9b009acfd2c1156b244487c2ff0b8907db82924558008f245055ed75f8"} Nov 25 18:51:15 crc kubenswrapper[4926]: I1125 18:51:15.511262 4926 generic.go:334] "Generic (PLEG): container finished" podID="80f16f49-2223-487d-9959-e369f60acbbb" containerID="acb48a9b009acfd2c1156b244487c2ff0b8907db82924558008f245055ed75f8" exitCode=0 Nov 25 18:51:15 crc kubenswrapper[4926]: I1125 18:51:15.511591 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j2s28" event={"ID":"80f16f49-2223-487d-9959-e369f60acbbb","Type":"ContainerDied","Data":"acb48a9b009acfd2c1156b244487c2ff0b8907db82924558008f245055ed75f8"} Nov 25 18:51:16 crc kubenswrapper[4926]: I1125 18:51:16.329936 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:51:16 crc kubenswrapper[4926]: E1125 18:51:16.330347 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:51:19 crc kubenswrapper[4926]: I1125 18:51:19.553448 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j2s28" event={"ID":"80f16f49-2223-487d-9959-e369f60acbbb","Type":"ContainerStarted","Data":"2314a10745c2d108fb5ef5f3367009cccb6988664aaba5e052e1ad42822343d8"} Nov 25 18:51:19 crc kubenswrapper[4926]: I1125 18:51:19.555595 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kkvkh" event={"ID":"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d","Type":"ContainerStarted","Data":"f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad"} Nov 25 18:51:19 crc kubenswrapper[4926]: I1125 18:51:19.574553 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j2s28" podStartSLOduration=3.385240599 podStartE2EDuration="11.574536921s" podCreationTimestamp="2025-11-25 18:51:08 +0000 UTC" firstStartedPulling="2025-11-25 18:51:10.449752437 +0000 UTC m=+2300.835266052" lastFinishedPulling="2025-11-25 18:51:18.639048759 +0000 UTC m=+2309.024562374" observedRunningTime="2025-11-25 18:51:19.573018538 +0000 UTC m=+2309.958532153" watchObservedRunningTime="2025-11-25 18:51:19.574536921 +0000 UTC m=+2309.960050526" Nov 25 18:51:20 crc kubenswrapper[4926]: I1125 18:51:20.568799 4926 generic.go:334] "Generic (PLEG): container finished" podID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerID="f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad" exitCode=0 Nov 25 18:51:20 crc kubenswrapper[4926]: I1125 18:51:20.568852 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kkvkh" event={"ID":"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d","Type":"ContainerDied","Data":"f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad"} Nov 25 18:51:25 crc kubenswrapper[4926]: I1125 18:51:25.645100 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kkvkh" event={"ID":"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d","Type":"ContainerStarted","Data":"cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d"} Nov 25 18:51:25 crc kubenswrapper[4926]: I1125 18:51:25.670422 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kkvkh" podStartSLOduration=2.864802551 podStartE2EDuration="18.670353183s" podCreationTimestamp="2025-11-25 18:51:07 +0000 UTC" firstStartedPulling="2025-11-25 18:51:08.420383913 +0000 UTC m=+2298.805897508" lastFinishedPulling="2025-11-25 18:51:24.225934535 +0000 UTC m=+2314.611448140" observedRunningTime="2025-11-25 18:51:25.663633908 +0000 UTC m=+2316.049147523" watchObservedRunningTime="2025-11-25 18:51:25.670353183 +0000 UTC m=+2316.055866788" Nov 25 18:51:27 crc kubenswrapper[4926]: I1125 18:51:27.419150 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:27 crc kubenswrapper[4926]: I1125 18:51:27.419740 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:51:28 crc kubenswrapper[4926]: I1125 18:51:28.329990 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:51:28 crc kubenswrapper[4926]: E1125 18:51:28.330733 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:51:28 crc kubenswrapper[4926]: I1125 18:51:28.504017 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kkvkh" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" probeResult="failure" output=< Nov 25 18:51:28 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:51:28 crc kubenswrapper[4926]: > Nov 25 18:51:29 crc kubenswrapper[4926]: I1125 18:51:29.191488 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:29 crc kubenswrapper[4926]: I1125 18:51:29.191580 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:30 crc kubenswrapper[4926]: I1125 18:51:30.347814 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-j2s28" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="registry-server" probeResult="failure" output=< Nov 25 18:51:30 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:51:30 crc kubenswrapper[4926]: > Nov 25 18:51:38 crc kubenswrapper[4926]: I1125 18:51:38.506345 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kkvkh" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" probeResult="failure" output=< Nov 25 18:51:38 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:51:38 crc kubenswrapper[4926]: > Nov 25 18:51:39 crc kubenswrapper[4926]: I1125 18:51:39.331845 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:51:39 crc kubenswrapper[4926]: E1125 18:51:39.332120 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:51:40 crc kubenswrapper[4926]: I1125 18:51:40.246943 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-j2s28" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="registry-server" probeResult="failure" output=< Nov 25 18:51:40 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:51:40 crc kubenswrapper[4926]: > Nov 25 18:51:48 crc kubenswrapper[4926]: I1125 18:51:48.508261 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kkvkh" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" probeResult="failure" output=< Nov 25 18:51:48 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:51:48 crc kubenswrapper[4926]: > Nov 25 18:51:49 crc kubenswrapper[4926]: I1125 18:51:49.253324 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:49 crc kubenswrapper[4926]: I1125 18:51:49.316964 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:49 crc kubenswrapper[4926]: I1125 18:51:49.489904 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j2s28"] Nov 25 18:51:50 crc kubenswrapper[4926]: I1125 18:51:50.340033 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:51:50 crc kubenswrapper[4926]: E1125 18:51:50.340698 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:51:50 crc kubenswrapper[4926]: I1125 18:51:50.954491 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j2s28" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="registry-server" containerID="cri-o://2314a10745c2d108fb5ef5f3367009cccb6988664aaba5e052e1ad42822343d8" gracePeriod=2 Nov 25 18:51:51 crc kubenswrapper[4926]: I1125 18:51:51.965099 4926 generic.go:334] "Generic (PLEG): container finished" podID="80f16f49-2223-487d-9959-e369f60acbbb" containerID="2314a10745c2d108fb5ef5f3367009cccb6988664aaba5e052e1ad42822343d8" exitCode=0 Nov 25 18:51:51 crc kubenswrapper[4926]: I1125 18:51:51.965186 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j2s28" event={"ID":"80f16f49-2223-487d-9959-e369f60acbbb","Type":"ContainerDied","Data":"2314a10745c2d108fb5ef5f3367009cccb6988664aaba5e052e1ad42822343d8"} Nov 25 18:51:51 crc kubenswrapper[4926]: I1125 18:51:51.965704 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j2s28" event={"ID":"80f16f49-2223-487d-9959-e369f60acbbb","Type":"ContainerDied","Data":"da37a223c97aa98bffffac12810b5a88593b886f235a39ece254d9785aef1484"} Nov 25 18:51:51 crc kubenswrapper[4926]: I1125 18:51:51.965723 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da37a223c97aa98bffffac12810b5a88593b886f235a39ece254d9785aef1484" Nov 25 18:51:51 crc kubenswrapper[4926]: I1125 18:51:51.988651 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.038662 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccq28\" (UniqueName: \"kubernetes.io/projected/80f16f49-2223-487d-9959-e369f60acbbb-kube-api-access-ccq28\") pod \"80f16f49-2223-487d-9959-e369f60acbbb\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.038926 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-utilities\") pod \"80f16f49-2223-487d-9959-e369f60acbbb\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.039027 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-catalog-content\") pod \"80f16f49-2223-487d-9959-e369f60acbbb\" (UID: \"80f16f49-2223-487d-9959-e369f60acbbb\") " Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.040021 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-utilities" (OuterVolumeSpecName: "utilities") pod "80f16f49-2223-487d-9959-e369f60acbbb" (UID: "80f16f49-2223-487d-9959-e369f60acbbb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.044461 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80f16f49-2223-487d-9959-e369f60acbbb-kube-api-access-ccq28" (OuterVolumeSpecName: "kube-api-access-ccq28") pod "80f16f49-2223-487d-9959-e369f60acbbb" (UID: "80f16f49-2223-487d-9959-e369f60acbbb"). InnerVolumeSpecName "kube-api-access-ccq28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.091421 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80f16f49-2223-487d-9959-e369f60acbbb" (UID: "80f16f49-2223-487d-9959-e369f60acbbb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.140790 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.140818 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccq28\" (UniqueName: \"kubernetes.io/projected/80f16f49-2223-487d-9959-e369f60acbbb-kube-api-access-ccq28\") on node \"crc\" DevicePath \"\"" Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.140829 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80f16f49-2223-487d-9959-e369f60acbbb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:51:52 crc kubenswrapper[4926]: I1125 18:51:52.974629 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j2s28" Nov 25 18:51:53 crc kubenswrapper[4926]: I1125 18:51:53.010569 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j2s28"] Nov 25 18:51:53 crc kubenswrapper[4926]: I1125 18:51:53.027230 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j2s28"] Nov 25 18:51:54 crc kubenswrapper[4926]: I1125 18:51:54.343326 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80f16f49-2223-487d-9959-e369f60acbbb" path="/var/lib/kubelet/pods/80f16f49-2223-487d-9959-e369f60acbbb/volumes" Nov 25 18:51:58 crc kubenswrapper[4926]: I1125 18:51:58.469879 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kkvkh" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" probeResult="failure" output=< Nov 25 18:51:58 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 18:51:58 crc kubenswrapper[4926]: > Nov 25 18:52:04 crc kubenswrapper[4926]: I1125 18:52:04.329265 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:52:04 crc kubenswrapper[4926]: E1125 18:52:04.330059 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:52:07 crc kubenswrapper[4926]: I1125 18:52:07.478896 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:52:07 crc kubenswrapper[4926]: I1125 18:52:07.534844 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.133782 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kkvkh"] Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.293903 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2xrjj"] Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.295794 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2xrjj" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="registry-server" containerID="cri-o://94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5" gracePeriod=2 Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.818597 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.972333 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-catalog-content\") pod \"3db52cdf-6c82-47a4-abf5-120741331680\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.973468 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbzxh\" (UniqueName: \"kubernetes.io/projected/3db52cdf-6c82-47a4-abf5-120741331680-kube-api-access-rbzxh\") pod \"3db52cdf-6c82-47a4-abf5-120741331680\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.973728 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-utilities\") pod \"3db52cdf-6c82-47a4-abf5-120741331680\" (UID: \"3db52cdf-6c82-47a4-abf5-120741331680\") " Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.974360 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-utilities" (OuterVolumeSpecName: "utilities") pod "3db52cdf-6c82-47a4-abf5-120741331680" (UID: "3db52cdf-6c82-47a4-abf5-120741331680"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.974920 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:52:08 crc kubenswrapper[4926]: I1125 18:52:08.980655 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3db52cdf-6c82-47a4-abf5-120741331680-kube-api-access-rbzxh" (OuterVolumeSpecName: "kube-api-access-rbzxh") pod "3db52cdf-6c82-47a4-abf5-120741331680" (UID: "3db52cdf-6c82-47a4-abf5-120741331680"). InnerVolumeSpecName "kube-api-access-rbzxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.076751 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbzxh\" (UniqueName: \"kubernetes.io/projected/3db52cdf-6c82-47a4-abf5-120741331680-kube-api-access-rbzxh\") on node \"crc\" DevicePath \"\"" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.095806 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3db52cdf-6c82-47a4-abf5-120741331680" (UID: "3db52cdf-6c82-47a4-abf5-120741331680"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.136820 4926 generic.go:334] "Generic (PLEG): container finished" podID="3db52cdf-6c82-47a4-abf5-120741331680" containerID="94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5" exitCode=0 Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.136997 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xrjj" event={"ID":"3db52cdf-6c82-47a4-abf5-120741331680","Type":"ContainerDied","Data":"94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5"} Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.137027 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xrjj" event={"ID":"3db52cdf-6c82-47a4-abf5-120741331680","Type":"ContainerDied","Data":"8331301e92799b977a34075833c5c70d25fe21ccc4ed82aa8d47dad9ec6a5ae6"} Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.137043 4926 scope.go:117] "RemoveContainer" containerID="94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.137206 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xrjj" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.174843 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2xrjj"] Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.178006 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db52cdf-6c82-47a4-abf5-120741331680-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.182056 4926 scope.go:117] "RemoveContainer" containerID="87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.184228 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2xrjj"] Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.205564 4926 scope.go:117] "RemoveContainer" containerID="6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.254971 4926 scope.go:117] "RemoveContainer" containerID="94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5" Nov 25 18:52:09 crc kubenswrapper[4926]: E1125 18:52:09.255359 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5\": container with ID starting with 94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5 not found: ID does not exist" containerID="94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.255429 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5"} err="failed to get container status \"94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5\": rpc error: code = NotFound desc = could not find container \"94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5\": container with ID starting with 94997a32a5212339f9dfbbba84906c1e20dec5e60fa96a2b42864df75c09b5c5 not found: ID does not exist" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.255461 4926 scope.go:117] "RemoveContainer" containerID="87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63" Nov 25 18:52:09 crc kubenswrapper[4926]: E1125 18:52:09.255903 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63\": container with ID starting with 87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63 not found: ID does not exist" containerID="87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.255939 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63"} err="failed to get container status \"87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63\": rpc error: code = NotFound desc = could not find container \"87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63\": container with ID starting with 87c9c1203e1c0ffd907fc4bcca68b8d75e36c93aeca0d558313b8f241ea07a63 not found: ID does not exist" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.255959 4926 scope.go:117] "RemoveContainer" containerID="6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6" Nov 25 18:52:09 crc kubenswrapper[4926]: E1125 18:52:09.256210 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6\": container with ID starting with 6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6 not found: ID does not exist" containerID="6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6" Nov 25 18:52:09 crc kubenswrapper[4926]: I1125 18:52:09.256245 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6"} err="failed to get container status \"6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6\": rpc error: code = NotFound desc = could not find container \"6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6\": container with ID starting with 6541c90329af35902a5ef106d938b36663848add6083b3d7e36d62a1ff1208e6 not found: ID does not exist" Nov 25 18:52:10 crc kubenswrapper[4926]: I1125 18:52:10.355923 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3db52cdf-6c82-47a4-abf5-120741331680" path="/var/lib/kubelet/pods/3db52cdf-6c82-47a4-abf5-120741331680/volumes" Nov 25 18:52:16 crc kubenswrapper[4926]: I1125 18:52:16.329879 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:52:16 crc kubenswrapper[4926]: E1125 18:52:16.330850 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:52:28 crc kubenswrapper[4926]: I1125 18:52:28.329778 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:52:28 crc kubenswrapper[4926]: E1125 18:52:28.330714 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:52:43 crc kubenswrapper[4926]: I1125 18:52:43.329888 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:52:43 crc kubenswrapper[4926]: E1125 18:52:43.331552 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:52:55 crc kubenswrapper[4926]: I1125 18:52:55.331711 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:52:55 crc kubenswrapper[4926]: E1125 18:52:55.332620 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:53:09 crc kubenswrapper[4926]: I1125 18:53:09.329866 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:53:09 crc kubenswrapper[4926]: E1125 18:53:09.330737 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:53:22 crc kubenswrapper[4926]: I1125 18:53:22.330433 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:53:22 crc kubenswrapper[4926]: E1125 18:53:22.331658 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:53:37 crc kubenswrapper[4926]: I1125 18:53:37.329720 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:53:37 crc kubenswrapper[4926]: E1125 18:53:37.331209 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:53:48 crc kubenswrapper[4926]: I1125 18:53:48.330059 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:53:48 crc kubenswrapper[4926]: E1125 18:53:48.331170 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:54:00 crc kubenswrapper[4926]: I1125 18:54:00.336601 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:54:00 crc kubenswrapper[4926]: E1125 18:54:00.363577 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:54:11 crc kubenswrapper[4926]: I1125 18:54:11.329906 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:54:11 crc kubenswrapper[4926]: E1125 18:54:11.330781 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:54:24 crc kubenswrapper[4926]: I1125 18:54:24.329520 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:54:24 crc kubenswrapper[4926]: E1125 18:54:24.330612 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:54:39 crc kubenswrapper[4926]: I1125 18:54:39.329773 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:54:39 crc kubenswrapper[4926]: E1125 18:54:39.330610 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:54:54 crc kubenswrapper[4926]: I1125 18:54:54.330835 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:54:54 crc kubenswrapper[4926]: E1125 18:54:54.331907 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 18:55:06 crc kubenswrapper[4926]: I1125 18:55:06.329861 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:55:07 crc kubenswrapper[4926]: I1125 18:55:07.584658 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"0a9fba914534656894c1354afeb1abc1746a5853600a27b6b9c61bb4920391f4"} Nov 25 18:55:22 crc kubenswrapper[4926]: I1125 18:55:22.196778 4926 scope.go:117] "RemoveContainer" containerID="11b5eeace705850ebff134dc36dfd7d69a55f13afbf21e9f8d519d596dec4f11" Nov 25 18:55:22 crc kubenswrapper[4926]: I1125 18:55:22.245267 4926 scope.go:117] "RemoveContainer" containerID="788ac9935c2eae0a479eea3550d0db2d03f8757bce2125d97b503968845796fc" Nov 25 18:55:22 crc kubenswrapper[4926]: I1125 18:55:22.279287 4926 scope.go:117] "RemoveContainer" containerID="24f20ec70d43c301c4c05c0bb2ac74bf52247e40396d26b549521af6a194b2fc" Nov 25 18:55:39 crc kubenswrapper[4926]: I1125 18:55:39.935805 4926 generic.go:334] "Generic (PLEG): container finished" podID="3967079a-4360-41e8-85e9-97c74ace2a79" containerID="332ff738da9e4e5e9bbc1883495ac82980ede1244f000399a72b08ce000750f5" exitCode=0 Nov 25 18:55:39 crc kubenswrapper[4926]: I1125 18:55:39.935904 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" event={"ID":"3967079a-4360-41e8-85e9-97c74ace2a79","Type":"ContainerDied","Data":"332ff738da9e4e5e9bbc1883495ac82980ede1244f000399a72b08ce000750f5"} Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.536694 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.650626 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-secret-0\") pod \"3967079a-4360-41e8-85e9-97c74ace2a79\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.650682 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p72mn\" (UniqueName: \"kubernetes.io/projected/3967079a-4360-41e8-85e9-97c74ace2a79-kube-api-access-p72mn\") pod \"3967079a-4360-41e8-85e9-97c74ace2a79\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.650774 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-combined-ca-bundle\") pod \"3967079a-4360-41e8-85e9-97c74ace2a79\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.650832 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-ssh-key\") pod \"3967079a-4360-41e8-85e9-97c74ace2a79\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.650864 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-inventory\") pod \"3967079a-4360-41e8-85e9-97c74ace2a79\" (UID: \"3967079a-4360-41e8-85e9-97c74ace2a79\") " Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.664472 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "3967079a-4360-41e8-85e9-97c74ace2a79" (UID: "3967079a-4360-41e8-85e9-97c74ace2a79"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.666686 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3967079a-4360-41e8-85e9-97c74ace2a79-kube-api-access-p72mn" (OuterVolumeSpecName: "kube-api-access-p72mn") pod "3967079a-4360-41e8-85e9-97c74ace2a79" (UID: "3967079a-4360-41e8-85e9-97c74ace2a79"). InnerVolumeSpecName "kube-api-access-p72mn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.679747 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-inventory" (OuterVolumeSpecName: "inventory") pod "3967079a-4360-41e8-85e9-97c74ace2a79" (UID: "3967079a-4360-41e8-85e9-97c74ace2a79"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.694546 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3967079a-4360-41e8-85e9-97c74ace2a79" (UID: "3967079a-4360-41e8-85e9-97c74ace2a79"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.696105 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "3967079a-4360-41e8-85e9-97c74ace2a79" (UID: "3967079a-4360-41e8-85e9-97c74ace2a79"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.753924 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p72mn\" (UniqueName: \"kubernetes.io/projected/3967079a-4360-41e8-85e9-97c74ace2a79-kube-api-access-p72mn\") on node \"crc\" DevicePath \"\"" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.754494 4926 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.754575 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.754589 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.754599 4926 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3967079a-4360-41e8-85e9-97c74ace2a79-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.967888 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" event={"ID":"3967079a-4360-41e8-85e9-97c74ace2a79","Type":"ContainerDied","Data":"29855cae95c7afbddb5960d4e5f3ee3bbecf75feabb0f30431f578b30b870d13"} Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.968289 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29855cae95c7afbddb5960d4e5f3ee3bbecf75feabb0f30431f578b30b870d13" Nov 25 18:55:41 crc kubenswrapper[4926]: I1125 18:55:41.967973 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-99d7m" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.149522 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg"] Nov 25 18:55:42 crc kubenswrapper[4926]: E1125 18:55:42.150116 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="registry-server" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150147 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="registry-server" Nov 25 18:55:42 crc kubenswrapper[4926]: E1125 18:55:42.150179 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="extract-utilities" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150190 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="extract-utilities" Nov 25 18:55:42 crc kubenswrapper[4926]: E1125 18:55:42.150211 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="extract-utilities" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150223 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="extract-utilities" Nov 25 18:55:42 crc kubenswrapper[4926]: E1125 18:55:42.150249 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3967079a-4360-41e8-85e9-97c74ace2a79" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150261 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3967079a-4360-41e8-85e9-97c74ace2a79" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 18:55:42 crc kubenswrapper[4926]: E1125 18:55:42.150281 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="registry-server" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150293 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="registry-server" Nov 25 18:55:42 crc kubenswrapper[4926]: E1125 18:55:42.150330 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="extract-content" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150340 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="extract-content" Nov 25 18:55:42 crc kubenswrapper[4926]: E1125 18:55:42.150353 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="extract-content" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150363 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="extract-content" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150643 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="80f16f49-2223-487d-9959-e369f60acbbb" containerName="registry-server" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150666 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3db52cdf-6c82-47a4-abf5-120741331680" containerName="registry-server" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.150689 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3967079a-4360-41e8-85e9-97c74ace2a79" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.151620 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.154762 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.155899 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.156048 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.157953 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.158022 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.158090 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.158104 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.199979 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg"] Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.296805 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.296895 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k5bt\" (UniqueName: \"kubernetes.io/projected/983d045a-121a-40e1-9948-ecd9a1569e26-kube-api-access-8k5bt\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.296944 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.296969 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.297063 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.297116 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.297137 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/983d045a-121a-40e1-9948-ecd9a1569e26-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.297233 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.297424 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.398903 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399038 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399104 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k5bt\" (UniqueName: \"kubernetes.io/projected/983d045a-121a-40e1-9948-ecd9a1569e26-kube-api-access-8k5bt\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399149 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399172 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399229 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399310 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399566 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/983d045a-121a-40e1-9948-ecd9a1569e26-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.399672 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.400284 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/983d045a-121a-40e1-9948-ecd9a1569e26-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.404266 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.404654 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.404868 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.404950 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.406949 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.407149 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.408141 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.420508 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k5bt\" (UniqueName: \"kubernetes.io/projected/983d045a-121a-40e1-9948-ecd9a1569e26-kube-api-access-8k5bt\") pod \"nova-edpm-deployment-openstack-edpm-ipam-w6frg\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:42 crc kubenswrapper[4926]: I1125 18:55:42.489793 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:55:43 crc kubenswrapper[4926]: I1125 18:55:43.012779 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg"] Nov 25 18:55:43 crc kubenswrapper[4926]: I1125 18:55:43.025266 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:55:43 crc kubenswrapper[4926]: I1125 18:55:43.991129 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" event={"ID":"983d045a-121a-40e1-9948-ecd9a1569e26","Type":"ContainerStarted","Data":"804aabe90b6a6430e22530c461069d57b1e58c7d57173e562011de23b10c25ba"} Nov 25 18:55:43 crc kubenswrapper[4926]: I1125 18:55:43.991662 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" event={"ID":"983d045a-121a-40e1-9948-ecd9a1569e26","Type":"ContainerStarted","Data":"b57cde6e2de3c7dcd9327d4443dd78524f16339fcfb6edaf98fbdfd76843713f"} Nov 25 18:55:44 crc kubenswrapper[4926]: I1125 18:55:44.015746 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" podStartSLOduration=1.476353264 podStartE2EDuration="2.01572369s" podCreationTimestamp="2025-11-25 18:55:42 +0000 UTC" firstStartedPulling="2025-11-25 18:55:43.025082731 +0000 UTC m=+2573.410596336" lastFinishedPulling="2025-11-25 18:55:43.564453127 +0000 UTC m=+2573.949966762" observedRunningTime="2025-11-25 18:55:44.010756073 +0000 UTC m=+2574.396269688" watchObservedRunningTime="2025-11-25 18:55:44.01572369 +0000 UTC m=+2574.401237295" Nov 25 18:57:22 crc kubenswrapper[4926]: I1125 18:57:22.407584 4926 scope.go:117] "RemoveContainer" containerID="ac7d8b70e3229c2524b701542eb1415e65fa4ff16533b6d73138380f8c31da15" Nov 25 18:57:22 crc kubenswrapper[4926]: I1125 18:57:22.437362 4926 scope.go:117] "RemoveContainer" containerID="2314a10745c2d108fb5ef5f3367009cccb6988664aaba5e052e1ad42822343d8" Nov 25 18:57:22 crc kubenswrapper[4926]: I1125 18:57:22.504203 4926 scope.go:117] "RemoveContainer" containerID="acb48a9b009acfd2c1156b244487c2ff0b8907db82924558008f245055ed75f8" Nov 25 18:57:33 crc kubenswrapper[4926]: I1125 18:57:33.541261 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:57:33 crc kubenswrapper[4926]: I1125 18:57:33.542068 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:58:03 crc kubenswrapper[4926]: I1125 18:58:03.541993 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:58:03 crc kubenswrapper[4926]: I1125 18:58:03.542808 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.541007 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.541648 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.541710 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.542751 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0a9fba914534656894c1354afeb1abc1746a5853600a27b6b9c61bb4920391f4"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.542840 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://0a9fba914534656894c1354afeb1abc1746a5853600a27b6b9c61bb4920391f4" gracePeriod=600 Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.997970 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="0a9fba914534656894c1354afeb1abc1746a5853600a27b6b9c61bb4920391f4" exitCode=0 Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.998046 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"0a9fba914534656894c1354afeb1abc1746a5853600a27b6b9c61bb4920391f4"} Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.998451 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36"} Nov 25 18:58:33 crc kubenswrapper[4926]: I1125 18:58:33.998470 4926 scope.go:117] "RemoveContainer" containerID="62b987cb5958921274a0b08b27c881f7075d6d6e987f9758f810f195fd87a0ee" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.479732 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-knw6v"] Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.482175 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.496004 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knw6v"] Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.611891 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-942vf\" (UniqueName: \"kubernetes.io/projected/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-kube-api-access-942vf\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.612315 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-catalog-content\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.612446 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-utilities\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.714663 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-utilities\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.714959 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-942vf\" (UniqueName: \"kubernetes.io/projected/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-kube-api-access-942vf\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.715088 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-catalog-content\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.715275 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-utilities\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.715818 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-catalog-content\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.739500 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-942vf\" (UniqueName: \"kubernetes.io/projected/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-kube-api-access-942vf\") pod \"redhat-marketplace-knw6v\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:56 crc kubenswrapper[4926]: I1125 18:58:56.811704 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:58:57 crc kubenswrapper[4926]: I1125 18:58:57.297345 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knw6v"] Nov 25 18:58:57 crc kubenswrapper[4926]: W1125 18:58:57.303184 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2a8e5bc_9b7d_4556_87b2_e83602a39cd8.slice/crio-bbec7551ed0bb8a6df38df3dcdcc746b0e4fa86044663f7daf53df9ad6a275b3 WatchSource:0}: Error finding container bbec7551ed0bb8a6df38df3dcdcc746b0e4fa86044663f7daf53df9ad6a275b3: Status 404 returned error can't find the container with id bbec7551ed0bb8a6df38df3dcdcc746b0e4fa86044663f7daf53df9ad6a275b3 Nov 25 18:58:58 crc kubenswrapper[4926]: I1125 18:58:58.307225 4926 generic.go:334] "Generic (PLEG): container finished" podID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerID="053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9" exitCode=0 Nov 25 18:58:58 crc kubenswrapper[4926]: I1125 18:58:58.307878 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knw6v" event={"ID":"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8","Type":"ContainerDied","Data":"053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9"} Nov 25 18:58:58 crc kubenswrapper[4926]: I1125 18:58:58.308536 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knw6v" event={"ID":"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8","Type":"ContainerStarted","Data":"bbec7551ed0bb8a6df38df3dcdcc746b0e4fa86044663f7daf53df9ad6a275b3"} Nov 25 18:58:59 crc kubenswrapper[4926]: I1125 18:58:59.320549 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knw6v" event={"ID":"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8","Type":"ContainerStarted","Data":"99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169"} Nov 25 18:59:00 crc kubenswrapper[4926]: I1125 18:59:00.339037 4926 generic.go:334] "Generic (PLEG): container finished" podID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerID="99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169" exitCode=0 Nov 25 18:59:00 crc kubenswrapper[4926]: I1125 18:59:00.353619 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knw6v" event={"ID":"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8","Type":"ContainerDied","Data":"99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169"} Nov 25 18:59:01 crc kubenswrapper[4926]: I1125 18:59:01.353715 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knw6v" event={"ID":"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8","Type":"ContainerStarted","Data":"84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488"} Nov 25 18:59:01 crc kubenswrapper[4926]: I1125 18:59:01.381300 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-knw6v" podStartSLOduration=2.950084736 podStartE2EDuration="5.381274058s" podCreationTimestamp="2025-11-25 18:58:56 +0000 UTC" firstStartedPulling="2025-11-25 18:58:58.310223693 +0000 UTC m=+2768.695737308" lastFinishedPulling="2025-11-25 18:59:00.741413025 +0000 UTC m=+2771.126926630" observedRunningTime="2025-11-25 18:59:01.378790069 +0000 UTC m=+2771.764303674" watchObservedRunningTime="2025-11-25 18:59:01.381274058 +0000 UTC m=+2771.766787693" Nov 25 18:59:06 crc kubenswrapper[4926]: I1125 18:59:06.812496 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:59:06 crc kubenswrapper[4926]: I1125 18:59:06.813312 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:59:06 crc kubenswrapper[4926]: I1125 18:59:06.894069 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:59:07 crc kubenswrapper[4926]: I1125 18:59:07.492356 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:59:07 crc kubenswrapper[4926]: I1125 18:59:07.540731 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knw6v"] Nov 25 18:59:08 crc kubenswrapper[4926]: I1125 18:59:08.446431 4926 generic.go:334] "Generic (PLEG): container finished" podID="983d045a-121a-40e1-9948-ecd9a1569e26" containerID="804aabe90b6a6430e22530c461069d57b1e58c7d57173e562011de23b10c25ba" exitCode=0 Nov 25 18:59:08 crc kubenswrapper[4926]: I1125 18:59:08.446524 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" event={"ID":"983d045a-121a-40e1-9948-ecd9a1569e26","Type":"ContainerDied","Data":"804aabe90b6a6430e22530c461069d57b1e58c7d57173e562011de23b10c25ba"} Nov 25 18:59:09 crc kubenswrapper[4926]: I1125 18:59:09.458843 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-knw6v" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="registry-server" containerID="cri-o://84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488" gracePeriod=2 Nov 25 18:59:09 crc kubenswrapper[4926]: I1125 18:59:09.987411 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.000646 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.153657 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-0\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.153731 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-utilities\") pod \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.153782 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-ssh-key\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.153815 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/983d045a-121a-40e1-9948-ecd9a1569e26-nova-extra-config-0\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.153844 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k5bt\" (UniqueName: \"kubernetes.io/projected/983d045a-121a-40e1-9948-ecd9a1569e26-kube-api-access-8k5bt\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.154088 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-inventory\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.154112 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-942vf\" (UniqueName: \"kubernetes.io/projected/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-kube-api-access-942vf\") pod \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.154164 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-combined-ca-bundle\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.154201 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-1\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.154270 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-catalog-content\") pod \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\" (UID: \"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.154348 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-1\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.154391 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-0\") pod \"983d045a-121a-40e1-9948-ecd9a1569e26\" (UID: \"983d045a-121a-40e1-9948-ecd9a1569e26\") " Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.155010 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-utilities" (OuterVolumeSpecName: "utilities") pod "d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" (UID: "d2a8e5bc-9b7d-4556-87b2-e83602a39cd8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.160989 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.161546 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/983d045a-121a-40e1-9948-ecd9a1569e26-kube-api-access-8k5bt" (OuterVolumeSpecName: "kube-api-access-8k5bt") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "kube-api-access-8k5bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.165715 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-kube-api-access-942vf" (OuterVolumeSpecName: "kube-api-access-942vf") pod "d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" (UID: "d2a8e5bc-9b7d-4556-87b2-e83602a39cd8"). InnerVolumeSpecName "kube-api-access-942vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.179583 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" (UID: "d2a8e5bc-9b7d-4556-87b2-e83602a39cd8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.187171 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.187579 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/983d045a-121a-40e1-9948-ecd9a1569e26-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.191234 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.194444 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.201132 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.203562 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-inventory" (OuterVolumeSpecName: "inventory") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.214964 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "983d045a-121a-40e1-9948-ecd9a1569e26" (UID: "983d045a-121a-40e1-9948-ecd9a1569e26"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.256943 4926 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.256978 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.256990 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257315 4926 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/983d045a-121a-40e1-9948-ecd9a1569e26-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257325 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k5bt\" (UniqueName: \"kubernetes.io/projected/983d045a-121a-40e1-9948-ecd9a1569e26-kube-api-access-8k5bt\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257334 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257343 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-942vf\" (UniqueName: \"kubernetes.io/projected/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-kube-api-access-942vf\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257351 4926 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257396 4926 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257407 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257417 4926 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.257427 4926 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/983d045a-121a-40e1-9948-ecd9a1569e26-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.472888 4926 generic.go:334] "Generic (PLEG): container finished" podID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerID="84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488" exitCode=0 Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.472976 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knw6v" event={"ID":"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8","Type":"ContainerDied","Data":"84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488"} Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.472986 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knw6v" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.473010 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knw6v" event={"ID":"d2a8e5bc-9b7d-4556-87b2-e83602a39cd8","Type":"ContainerDied","Data":"bbec7551ed0bb8a6df38df3dcdcc746b0e4fa86044663f7daf53df9ad6a275b3"} Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.473034 4926 scope.go:117] "RemoveContainer" containerID="84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.476672 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" event={"ID":"983d045a-121a-40e1-9948-ecd9a1569e26","Type":"ContainerDied","Data":"b57cde6e2de3c7dcd9327d4443dd78524f16339fcfb6edaf98fbdfd76843713f"} Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.476710 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b57cde6e2de3c7dcd9327d4443dd78524f16339fcfb6edaf98fbdfd76843713f" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.476759 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-w6frg" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.528981 4926 scope.go:117] "RemoveContainer" containerID="99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.542626 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knw6v"] Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.548521 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-knw6v"] Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.564050 4926 scope.go:117] "RemoveContainer" containerID="053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.609503 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs"] Nov 25 18:59:10 crc kubenswrapper[4926]: E1125 18:59:10.610169 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="registry-server" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.610199 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="registry-server" Nov 25 18:59:10 crc kubenswrapper[4926]: E1125 18:59:10.610234 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="extract-utilities" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.610248 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="extract-utilities" Nov 25 18:59:10 crc kubenswrapper[4926]: E1125 18:59:10.610297 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="983d045a-121a-40e1-9948-ecd9a1569e26" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.610311 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="983d045a-121a-40e1-9948-ecd9a1569e26" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 18:59:10 crc kubenswrapper[4926]: E1125 18:59:10.610346 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="extract-content" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.610360 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="extract-content" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.610745 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="983d045a-121a-40e1-9948-ecd9a1569e26" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.610792 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" containerName="registry-server" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.612064 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.614505 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.614921 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.615086 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.615284 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-k2ctl" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.615836 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.621415 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs"] Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.665212 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.665284 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.665354 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k26t\" (UniqueName: \"kubernetes.io/projected/d752f8f9-0324-4383-9157-f1e23a46572b-kube-api-access-7k26t\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.665400 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.665430 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.665479 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.665524 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.670399 4926 scope.go:117] "RemoveContainer" containerID="84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488" Nov 25 18:59:10 crc kubenswrapper[4926]: E1125 18:59:10.670742 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488\": container with ID starting with 84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488 not found: ID does not exist" containerID="84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.670770 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488"} err="failed to get container status \"84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488\": rpc error: code = NotFound desc = could not find container \"84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488\": container with ID starting with 84c655d987a1d9f9328218cc816c07961c9d5b164d3ae3728865685a6fc86488 not found: ID does not exist" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.670826 4926 scope.go:117] "RemoveContainer" containerID="99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169" Nov 25 18:59:10 crc kubenswrapper[4926]: E1125 18:59:10.671049 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169\": container with ID starting with 99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169 not found: ID does not exist" containerID="99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.671072 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169"} err="failed to get container status \"99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169\": rpc error: code = NotFound desc = could not find container \"99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169\": container with ID starting with 99218f870cb10749279e7372798bdfb574eef845636fc051ac96ebc0abfb9169 not found: ID does not exist" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.671089 4926 scope.go:117] "RemoveContainer" containerID="053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9" Nov 25 18:59:10 crc kubenswrapper[4926]: E1125 18:59:10.671288 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9\": container with ID starting with 053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9 not found: ID does not exist" containerID="053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.671309 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9"} err="failed to get container status \"053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9\": rpc error: code = NotFound desc = could not find container \"053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9\": container with ID starting with 053624c910972376bfca22a6a8ae2193bf7324eea08d487e781aca28a28490d9 not found: ID does not exist" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.767579 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.767629 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.767715 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.767811 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k26t\" (UniqueName: \"kubernetes.io/projected/d752f8f9-0324-4383-9157-f1e23a46572b-kube-api-access-7k26t\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.767842 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.767871 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.767910 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.772610 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.772786 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.772833 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.773077 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.777050 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.777266 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:10 crc kubenswrapper[4926]: I1125 18:59:10.790327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k26t\" (UniqueName: \"kubernetes.io/projected/d752f8f9-0324-4383-9157-f1e23a46572b-kube-api-access-7k26t\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:11 crc kubenswrapper[4926]: I1125 18:59:11.010030 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 18:59:11 crc kubenswrapper[4926]: I1125 18:59:11.560520 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs"] Nov 25 18:59:12 crc kubenswrapper[4926]: I1125 18:59:12.345848 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2a8e5bc-9b7d-4556-87b2-e83602a39cd8" path="/var/lib/kubelet/pods/d2a8e5bc-9b7d-4556-87b2-e83602a39cd8/volumes" Nov 25 18:59:12 crc kubenswrapper[4926]: I1125 18:59:12.501489 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" event={"ID":"d752f8f9-0324-4383-9157-f1e23a46572b","Type":"ContainerStarted","Data":"545cd4565dd6ba85c0e8aa62750f25f27a95c2ac0b6617356410142890a04444"} Nov 25 18:59:12 crc kubenswrapper[4926]: I1125 18:59:12.501530 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" event={"ID":"d752f8f9-0324-4383-9157-f1e23a46572b","Type":"ContainerStarted","Data":"7a6727ae44ffa7b372a76946f92560e5cd10955e776357de718b15ac8f030f8b"} Nov 25 18:59:12 crc kubenswrapper[4926]: I1125 18:59:12.529331 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" podStartSLOduration=1.9963010620000001 podStartE2EDuration="2.529311385s" podCreationTimestamp="2025-11-25 18:59:10 +0000 UTC" firstStartedPulling="2025-11-25 18:59:11.564763984 +0000 UTC m=+2781.950277629" lastFinishedPulling="2025-11-25 18:59:12.097774307 +0000 UTC m=+2782.483287952" observedRunningTime="2025-11-25 18:59:12.52293769 +0000 UTC m=+2782.908451325" watchObservedRunningTime="2025-11-25 18:59:12.529311385 +0000 UTC m=+2782.914825000" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.163226 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml"] Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.165148 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.168548 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.171794 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.182830 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml"] Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.333273 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67528577-8a4c-4df6-b801-8a49064d4af6-config-volume\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.333338 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67528577-8a4c-4df6-b801-8a49064d4af6-secret-volume\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.333364 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfnvf\" (UniqueName: \"kubernetes.io/projected/67528577-8a4c-4df6-b801-8a49064d4af6-kube-api-access-qfnvf\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.435491 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67528577-8a4c-4df6-b801-8a49064d4af6-secret-volume\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.435599 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfnvf\" (UniqueName: \"kubernetes.io/projected/67528577-8a4c-4df6-b801-8a49064d4af6-kube-api-access-qfnvf\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.435856 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67528577-8a4c-4df6-b801-8a49064d4af6-config-volume\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.438413 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67528577-8a4c-4df6-b801-8a49064d4af6-config-volume\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.444962 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67528577-8a4c-4df6-b801-8a49064d4af6-secret-volume\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.464974 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfnvf\" (UniqueName: \"kubernetes.io/projected/67528577-8a4c-4df6-b801-8a49064d4af6-kube-api-access-qfnvf\") pod \"collect-profiles-29401620-pvlml\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.490998 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:00 crc kubenswrapper[4926]: I1125 19:00:00.955983 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml"] Nov 25 19:00:01 crc kubenswrapper[4926]: I1125 19:00:01.681957 4926 generic.go:334] "Generic (PLEG): container finished" podID="67528577-8a4c-4df6-b801-8a49064d4af6" containerID="0cf71e2e16575aaae2c8272d660b0d84dbb83e1a68c56feb53c72105a4ed938d" exitCode=0 Nov 25 19:00:01 crc kubenswrapper[4926]: I1125 19:00:01.682300 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" event={"ID":"67528577-8a4c-4df6-b801-8a49064d4af6","Type":"ContainerDied","Data":"0cf71e2e16575aaae2c8272d660b0d84dbb83e1a68c56feb53c72105a4ed938d"} Nov 25 19:00:01 crc kubenswrapper[4926]: I1125 19:00:01.682331 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" event={"ID":"67528577-8a4c-4df6-b801-8a49064d4af6","Type":"ContainerStarted","Data":"f12b120a47d9d2595dad38be61e205915896bf4b59e2e3632682d72636e23d35"} Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.052293 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.195893 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67528577-8a4c-4df6-b801-8a49064d4af6-config-volume\") pod \"67528577-8a4c-4df6-b801-8a49064d4af6\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.196463 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67528577-8a4c-4df6-b801-8a49064d4af6-secret-volume\") pod \"67528577-8a4c-4df6-b801-8a49064d4af6\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.196544 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfnvf\" (UniqueName: \"kubernetes.io/projected/67528577-8a4c-4df6-b801-8a49064d4af6-kube-api-access-qfnvf\") pod \"67528577-8a4c-4df6-b801-8a49064d4af6\" (UID: \"67528577-8a4c-4df6-b801-8a49064d4af6\") " Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.196865 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67528577-8a4c-4df6-b801-8a49064d4af6-config-volume" (OuterVolumeSpecName: "config-volume") pod "67528577-8a4c-4df6-b801-8a49064d4af6" (UID: "67528577-8a4c-4df6-b801-8a49064d4af6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.199190 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67528577-8a4c-4df6-b801-8a49064d4af6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.204207 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67528577-8a4c-4df6-b801-8a49064d4af6-kube-api-access-qfnvf" (OuterVolumeSpecName: "kube-api-access-qfnvf") pod "67528577-8a4c-4df6-b801-8a49064d4af6" (UID: "67528577-8a4c-4df6-b801-8a49064d4af6"). InnerVolumeSpecName "kube-api-access-qfnvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.204459 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67528577-8a4c-4df6-b801-8a49064d4af6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "67528577-8a4c-4df6-b801-8a49064d4af6" (UID: "67528577-8a4c-4df6-b801-8a49064d4af6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.302222 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67528577-8a4c-4df6-b801-8a49064d4af6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.302272 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfnvf\" (UniqueName: \"kubernetes.io/projected/67528577-8a4c-4df6-b801-8a49064d4af6-kube-api-access-qfnvf\") on node \"crc\" DevicePath \"\"" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.707855 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" event={"ID":"67528577-8a4c-4df6-b801-8a49064d4af6","Type":"ContainerDied","Data":"f12b120a47d9d2595dad38be61e205915896bf4b59e2e3632682d72636e23d35"} Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.707903 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f12b120a47d9d2595dad38be61e205915896bf4b59e2e3632682d72636e23d35" Nov 25 19:00:03 crc kubenswrapper[4926]: I1125 19:00:03.707921 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml" Nov 25 19:00:04 crc kubenswrapper[4926]: I1125 19:00:04.158751 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs"] Nov 25 19:00:04 crc kubenswrapper[4926]: I1125 19:00:04.170029 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401575-pzfcs"] Nov 25 19:00:04 crc kubenswrapper[4926]: I1125 19:00:04.342746 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6560e66-aef6-4fd2-b808-4bdfaad6b992" path="/var/lib/kubelet/pods/b6560e66-aef6-4fd2-b808-4bdfaad6b992/volumes" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.496641 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8pf5x"] Nov 25 19:00:21 crc kubenswrapper[4926]: E1125 19:00:21.498041 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67528577-8a4c-4df6-b801-8a49064d4af6" containerName="collect-profiles" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.498066 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="67528577-8a4c-4df6-b801-8a49064d4af6" containerName="collect-profiles" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.498441 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="67528577-8a4c-4df6-b801-8a49064d4af6" containerName="collect-profiles" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.500406 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.509803 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8pf5x"] Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.557986 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-utilities\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.558053 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-catalog-content\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.558206 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24gr5\" (UniqueName: \"kubernetes.io/projected/8e05e22d-df96-49cf-b609-1f70009a0922-kube-api-access-24gr5\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.660520 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-utilities\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.660575 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-catalog-content\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.660643 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24gr5\" (UniqueName: \"kubernetes.io/projected/8e05e22d-df96-49cf-b609-1f70009a0922-kube-api-access-24gr5\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.661146 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-utilities\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.661180 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-catalog-content\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.688275 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24gr5\" (UniqueName: \"kubernetes.io/projected/8e05e22d-df96-49cf-b609-1f70009a0922-kube-api-access-24gr5\") pod \"community-operators-8pf5x\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:21 crc kubenswrapper[4926]: I1125 19:00:21.863833 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:22 crc kubenswrapper[4926]: I1125 19:00:22.414026 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8pf5x"] Nov 25 19:00:22 crc kubenswrapper[4926]: I1125 19:00:22.622738 4926 scope.go:117] "RemoveContainer" containerID="2361a082ccf88b5a14c2b5d5ab11d7acf571ffa5267e3ae4f5325856560693ab" Nov 25 19:00:22 crc kubenswrapper[4926]: I1125 19:00:22.968172 4926 generic.go:334] "Generic (PLEG): container finished" podID="8e05e22d-df96-49cf-b609-1f70009a0922" containerID="84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b" exitCode=0 Nov 25 19:00:22 crc kubenswrapper[4926]: I1125 19:00:22.968337 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8pf5x" event={"ID":"8e05e22d-df96-49cf-b609-1f70009a0922","Type":"ContainerDied","Data":"84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b"} Nov 25 19:00:22 crc kubenswrapper[4926]: I1125 19:00:22.968487 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8pf5x" event={"ID":"8e05e22d-df96-49cf-b609-1f70009a0922","Type":"ContainerStarted","Data":"063e2a8e03e177d4e3fcd63c4074cae7cdbda2470829850fe71f85634bdefe7e"} Nov 25 19:00:23 crc kubenswrapper[4926]: I1125 19:00:23.978320 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8pf5x" event={"ID":"8e05e22d-df96-49cf-b609-1f70009a0922","Type":"ContainerStarted","Data":"0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2"} Nov 25 19:00:25 crc kubenswrapper[4926]: I1125 19:00:25.998090 4926 generic.go:334] "Generic (PLEG): container finished" podID="8e05e22d-df96-49cf-b609-1f70009a0922" containerID="0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2" exitCode=0 Nov 25 19:00:25 crc kubenswrapper[4926]: I1125 19:00:25.998184 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8pf5x" event={"ID":"8e05e22d-df96-49cf-b609-1f70009a0922","Type":"ContainerDied","Data":"0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2"} Nov 25 19:00:27 crc kubenswrapper[4926]: I1125 19:00:27.016721 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8pf5x" event={"ID":"8e05e22d-df96-49cf-b609-1f70009a0922","Type":"ContainerStarted","Data":"647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8"} Nov 25 19:00:27 crc kubenswrapper[4926]: I1125 19:00:27.043644 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8pf5x" podStartSLOduration=2.599915991 podStartE2EDuration="6.043626881s" podCreationTimestamp="2025-11-25 19:00:21 +0000 UTC" firstStartedPulling="2025-11-25 19:00:22.970506712 +0000 UTC m=+2853.356020327" lastFinishedPulling="2025-11-25 19:00:26.414217612 +0000 UTC m=+2856.799731217" observedRunningTime="2025-11-25 19:00:27.036533095 +0000 UTC m=+2857.422046700" watchObservedRunningTime="2025-11-25 19:00:27.043626881 +0000 UTC m=+2857.429140486" Nov 25 19:00:31 crc kubenswrapper[4926]: I1125 19:00:31.863937 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:31 crc kubenswrapper[4926]: I1125 19:00:31.864659 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:31 crc kubenswrapper[4926]: I1125 19:00:31.941288 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:32 crc kubenswrapper[4926]: I1125 19:00:32.135584 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:33 crc kubenswrapper[4926]: I1125 19:00:33.541109 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:00:33 crc kubenswrapper[4926]: I1125 19:00:33.541421 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:00:33 crc kubenswrapper[4926]: I1125 19:00:33.859900 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8pf5x"] Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.122625 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8pf5x" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="registry-server" containerID="cri-o://647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8" gracePeriod=2 Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.603075 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.758073 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24gr5\" (UniqueName: \"kubernetes.io/projected/8e05e22d-df96-49cf-b609-1f70009a0922-kube-api-access-24gr5\") pod \"8e05e22d-df96-49cf-b609-1f70009a0922\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.758212 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-catalog-content\") pod \"8e05e22d-df96-49cf-b609-1f70009a0922\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.758284 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-utilities\") pod \"8e05e22d-df96-49cf-b609-1f70009a0922\" (UID: \"8e05e22d-df96-49cf-b609-1f70009a0922\") " Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.758903 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-utilities" (OuterVolumeSpecName: "utilities") pod "8e05e22d-df96-49cf-b609-1f70009a0922" (UID: "8e05e22d-df96-49cf-b609-1f70009a0922"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.768748 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e05e22d-df96-49cf-b609-1f70009a0922-kube-api-access-24gr5" (OuterVolumeSpecName: "kube-api-access-24gr5") pod "8e05e22d-df96-49cf-b609-1f70009a0922" (UID: "8e05e22d-df96-49cf-b609-1f70009a0922"). InnerVolumeSpecName "kube-api-access-24gr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.811575 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e05e22d-df96-49cf-b609-1f70009a0922" (UID: "8e05e22d-df96-49cf-b609-1f70009a0922"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.860358 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.860417 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e05e22d-df96-49cf-b609-1f70009a0922-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:00:35 crc kubenswrapper[4926]: I1125 19:00:35.860430 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24gr5\" (UniqueName: \"kubernetes.io/projected/8e05e22d-df96-49cf-b609-1f70009a0922-kube-api-access-24gr5\") on node \"crc\" DevicePath \"\"" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.136892 4926 generic.go:334] "Generic (PLEG): container finished" podID="8e05e22d-df96-49cf-b609-1f70009a0922" containerID="647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8" exitCode=0 Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.136933 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8pf5x" event={"ID":"8e05e22d-df96-49cf-b609-1f70009a0922","Type":"ContainerDied","Data":"647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8"} Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.136962 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8pf5x" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.136976 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8pf5x" event={"ID":"8e05e22d-df96-49cf-b609-1f70009a0922","Type":"ContainerDied","Data":"063e2a8e03e177d4e3fcd63c4074cae7cdbda2470829850fe71f85634bdefe7e"} Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.137001 4926 scope.go:117] "RemoveContainer" containerID="647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.177423 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8pf5x"] Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.179383 4926 scope.go:117] "RemoveContainer" containerID="0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.187213 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8pf5x"] Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.216477 4926 scope.go:117] "RemoveContainer" containerID="84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.271112 4926 scope.go:117] "RemoveContainer" containerID="647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8" Nov 25 19:00:36 crc kubenswrapper[4926]: E1125 19:00:36.271566 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8\": container with ID starting with 647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8 not found: ID does not exist" containerID="647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.271597 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8"} err="failed to get container status \"647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8\": rpc error: code = NotFound desc = could not find container \"647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8\": container with ID starting with 647f0d9f86f598e66ab135957def7c1f433374e5009f3568eb6aaebaf11d59f8 not found: ID does not exist" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.271617 4926 scope.go:117] "RemoveContainer" containerID="0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2" Nov 25 19:00:36 crc kubenswrapper[4926]: E1125 19:00:36.271862 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2\": container with ID starting with 0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2 not found: ID does not exist" containerID="0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.271889 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2"} err="failed to get container status \"0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2\": rpc error: code = NotFound desc = could not find container \"0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2\": container with ID starting with 0aba55704a2b269959a81e7c05e9b78efbf89c40a4a8d54f5eaebefc4fa505a2 not found: ID does not exist" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.271911 4926 scope.go:117] "RemoveContainer" containerID="84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b" Nov 25 19:00:36 crc kubenswrapper[4926]: E1125 19:00:36.272140 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b\": container with ID starting with 84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b not found: ID does not exist" containerID="84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.272165 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b"} err="failed to get container status \"84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b\": rpc error: code = NotFound desc = could not find container \"84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b\": container with ID starting with 84e850cbb1a53a7303c0db680a57c27b60e94ddcbefa8d0f2be5ba7ef1c5a80b not found: ID does not exist" Nov 25 19:00:36 crc kubenswrapper[4926]: I1125 19:00:36.344286 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" path="/var/lib/kubelet/pods/8e05e22d-df96-49cf-b609-1f70009a0922/volumes" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.162051 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401621-zm5n9"] Nov 25 19:01:00 crc kubenswrapper[4926]: E1125 19:01:00.163100 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="registry-server" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.163118 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="registry-server" Nov 25 19:01:00 crc kubenswrapper[4926]: E1125 19:01:00.163169 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="extract-content" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.163177 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="extract-content" Nov 25 19:01:00 crc kubenswrapper[4926]: E1125 19:01:00.163195 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="extract-utilities" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.163203 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="extract-utilities" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.163451 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e05e22d-df96-49cf-b609-1f70009a0922" containerName="registry-server" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.164274 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.178641 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401621-zm5n9"] Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.312989 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-combined-ca-bundle\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.313433 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-config-data\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.313671 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-fernet-keys\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.313893 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5k6n\" (UniqueName: \"kubernetes.io/projected/e2a52929-6ede-453c-a04b-cbe357ca6476-kube-api-access-q5k6n\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.416140 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-fernet-keys\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.416219 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5k6n\" (UniqueName: \"kubernetes.io/projected/e2a52929-6ede-453c-a04b-cbe357ca6476-kube-api-access-q5k6n\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.416327 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-combined-ca-bundle\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.416365 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-config-data\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.424448 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-combined-ca-bundle\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.428651 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-fernet-keys\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.432530 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-config-data\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.441682 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5k6n\" (UniqueName: \"kubernetes.io/projected/e2a52929-6ede-453c-a04b-cbe357ca6476-kube-api-access-q5k6n\") pod \"keystone-cron-29401621-zm5n9\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:00 crc kubenswrapper[4926]: I1125 19:01:00.493124 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:01 crc kubenswrapper[4926]: I1125 19:01:01.027949 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401621-zm5n9"] Nov 25 19:01:01 crc kubenswrapper[4926]: I1125 19:01:01.427605 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401621-zm5n9" event={"ID":"e2a52929-6ede-453c-a04b-cbe357ca6476","Type":"ContainerStarted","Data":"ab6e1f52afd2a2952a6e9b5365c46439179678d9893f22a8f872f772d3bdb3b4"} Nov 25 19:01:01 crc kubenswrapper[4926]: I1125 19:01:01.427955 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401621-zm5n9" event={"ID":"e2a52929-6ede-453c-a04b-cbe357ca6476","Type":"ContainerStarted","Data":"312b392034bd988536dba5b9592ef0773e5b95550e11596f86f84b9e575ed4d3"} Nov 25 19:01:01 crc kubenswrapper[4926]: I1125 19:01:01.454497 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401621-zm5n9" podStartSLOduration=1.454475502 podStartE2EDuration="1.454475502s" podCreationTimestamp="2025-11-25 19:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 19:01:01.450851863 +0000 UTC m=+2891.836365508" watchObservedRunningTime="2025-11-25 19:01:01.454475502 +0000 UTC m=+2891.839989117" Nov 25 19:01:03 crc kubenswrapper[4926]: I1125 19:01:03.541734 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:01:03 crc kubenswrapper[4926]: I1125 19:01:03.542003 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:01:04 crc kubenswrapper[4926]: I1125 19:01:04.468253 4926 generic.go:334] "Generic (PLEG): container finished" podID="e2a52929-6ede-453c-a04b-cbe357ca6476" containerID="ab6e1f52afd2a2952a6e9b5365c46439179678d9893f22a8f872f772d3bdb3b4" exitCode=0 Nov 25 19:01:04 crc kubenswrapper[4926]: I1125 19:01:04.468303 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401621-zm5n9" event={"ID":"e2a52929-6ede-453c-a04b-cbe357ca6476","Type":"ContainerDied","Data":"ab6e1f52afd2a2952a6e9b5365c46439179678d9893f22a8f872f772d3bdb3b4"} Nov 25 19:01:05 crc kubenswrapper[4926]: I1125 19:01:05.879364 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:05 crc kubenswrapper[4926]: I1125 19:01:05.987080 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-combined-ca-bundle\") pod \"e2a52929-6ede-453c-a04b-cbe357ca6476\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " Nov 25 19:01:05 crc kubenswrapper[4926]: I1125 19:01:05.987328 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5k6n\" (UniqueName: \"kubernetes.io/projected/e2a52929-6ede-453c-a04b-cbe357ca6476-kube-api-access-q5k6n\") pod \"e2a52929-6ede-453c-a04b-cbe357ca6476\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " Nov 25 19:01:05 crc kubenswrapper[4926]: I1125 19:01:05.987354 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-config-data\") pod \"e2a52929-6ede-453c-a04b-cbe357ca6476\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " Nov 25 19:01:05 crc kubenswrapper[4926]: I1125 19:01:05.987393 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-fernet-keys\") pod \"e2a52929-6ede-453c-a04b-cbe357ca6476\" (UID: \"e2a52929-6ede-453c-a04b-cbe357ca6476\") " Nov 25 19:01:05 crc kubenswrapper[4926]: I1125 19:01:05.992919 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2a52929-6ede-453c-a04b-cbe357ca6476-kube-api-access-q5k6n" (OuterVolumeSpecName: "kube-api-access-q5k6n") pod "e2a52929-6ede-453c-a04b-cbe357ca6476" (UID: "e2a52929-6ede-453c-a04b-cbe357ca6476"). InnerVolumeSpecName "kube-api-access-q5k6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:01:05 crc kubenswrapper[4926]: I1125 19:01:05.995111 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e2a52929-6ede-453c-a04b-cbe357ca6476" (UID: "e2a52929-6ede-453c-a04b-cbe357ca6476"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.033470 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2a52929-6ede-453c-a04b-cbe357ca6476" (UID: "e2a52929-6ede-453c-a04b-cbe357ca6476"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.047456 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-config-data" (OuterVolumeSpecName: "config-data") pod "e2a52929-6ede-453c-a04b-cbe357ca6476" (UID: "e2a52929-6ede-453c-a04b-cbe357ca6476"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.089543 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.089679 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5k6n\" (UniqueName: \"kubernetes.io/projected/e2a52929-6ede-453c-a04b-cbe357ca6476-kube-api-access-q5k6n\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.089758 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.089842 4926 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2a52929-6ede-453c-a04b-cbe357ca6476-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.487805 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401621-zm5n9" event={"ID":"e2a52929-6ede-453c-a04b-cbe357ca6476","Type":"ContainerDied","Data":"312b392034bd988536dba5b9592ef0773e5b95550e11596f86f84b9e575ed4d3"} Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.487872 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="312b392034bd988536dba5b9592ef0773e5b95550e11596f86f84b9e575ed4d3" Nov 25 19:01:06 crc kubenswrapper[4926]: I1125 19:01:06.487977 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401621-zm5n9" Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.541929 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.542559 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.542622 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.543468 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.543538 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" gracePeriod=600 Nov 25 19:01:33 crc kubenswrapper[4926]: E1125 19:01:33.692901 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.784649 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" exitCode=0 Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.784701 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36"} Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.784744 4926 scope.go:117] "RemoveContainer" containerID="0a9fba914534656894c1354afeb1abc1746a5853600a27b6b9c61bb4920391f4" Nov 25 19:01:33 crc kubenswrapper[4926]: I1125 19:01:33.785510 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:01:33 crc kubenswrapper[4926]: E1125 19:01:33.785809 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:01:46 crc kubenswrapper[4926]: I1125 19:01:46.329865 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:01:46 crc kubenswrapper[4926]: E1125 19:01:46.330888 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:01:49 crc kubenswrapper[4926]: I1125 19:01:49.967947 4926 generic.go:334] "Generic (PLEG): container finished" podID="d752f8f9-0324-4383-9157-f1e23a46572b" containerID="545cd4565dd6ba85c0e8aa62750f25f27a95c2ac0b6617356410142890a04444" exitCode=0 Nov 25 19:01:49 crc kubenswrapper[4926]: I1125 19:01:49.968095 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" event={"ID":"d752f8f9-0324-4383-9157-f1e23a46572b","Type":"ContainerDied","Data":"545cd4565dd6ba85c0e8aa62750f25f27a95c2ac0b6617356410142890a04444"} Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.433912 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.562857 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ssh-key\") pod \"d752f8f9-0324-4383-9157-f1e23a46572b\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.563251 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-1\") pod \"d752f8f9-0324-4383-9157-f1e23a46572b\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.563401 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-2\") pod \"d752f8f9-0324-4383-9157-f1e23a46572b\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.563619 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-inventory\") pod \"d752f8f9-0324-4383-9157-f1e23a46572b\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.563784 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7k26t\" (UniqueName: \"kubernetes.io/projected/d752f8f9-0324-4383-9157-f1e23a46572b-kube-api-access-7k26t\") pod \"d752f8f9-0324-4383-9157-f1e23a46572b\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.563914 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-0\") pod \"d752f8f9-0324-4383-9157-f1e23a46572b\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.564075 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-telemetry-combined-ca-bundle\") pod \"d752f8f9-0324-4383-9157-f1e23a46572b\" (UID: \"d752f8f9-0324-4383-9157-f1e23a46572b\") " Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.571430 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "d752f8f9-0324-4383-9157-f1e23a46572b" (UID: "d752f8f9-0324-4383-9157-f1e23a46572b"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.574586 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d752f8f9-0324-4383-9157-f1e23a46572b-kube-api-access-7k26t" (OuterVolumeSpecName: "kube-api-access-7k26t") pod "d752f8f9-0324-4383-9157-f1e23a46572b" (UID: "d752f8f9-0324-4383-9157-f1e23a46572b"). InnerVolumeSpecName "kube-api-access-7k26t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.600883 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "d752f8f9-0324-4383-9157-f1e23a46572b" (UID: "d752f8f9-0324-4383-9157-f1e23a46572b"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.613909 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "d752f8f9-0324-4383-9157-f1e23a46572b" (UID: "d752f8f9-0324-4383-9157-f1e23a46572b"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.614719 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "d752f8f9-0324-4383-9157-f1e23a46572b" (UID: "d752f8f9-0324-4383-9157-f1e23a46572b"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.619349 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-inventory" (OuterVolumeSpecName: "inventory") pod "d752f8f9-0324-4383-9157-f1e23a46572b" (UID: "d752f8f9-0324-4383-9157-f1e23a46572b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.620325 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d752f8f9-0324-4383-9157-f1e23a46572b" (UID: "d752f8f9-0324-4383-9157-f1e23a46572b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.666993 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7k26t\" (UniqueName: \"kubernetes.io/projected/d752f8f9-0324-4383-9157-f1e23a46572b-kube-api-access-7k26t\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.667031 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.667045 4926 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.667057 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.667066 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.667075 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.667084 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d752f8f9-0324-4383-9157-f1e23a46572b-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.992227 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" event={"ID":"d752f8f9-0324-4383-9157-f1e23a46572b","Type":"ContainerDied","Data":"7a6727ae44ffa7b372a76946f92560e5cd10955e776357de718b15ac8f030f8b"} Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.992269 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a6727ae44ffa7b372a76946f92560e5cd10955e776357de718b15ac8f030f8b" Nov 25 19:01:51 crc kubenswrapper[4926]: I1125 19:01:51.992344 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs" Nov 25 19:01:58 crc kubenswrapper[4926]: I1125 19:01:58.330024 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:01:58 crc kubenswrapper[4926]: E1125 19:01:58.331052 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:02:13 crc kubenswrapper[4926]: I1125 19:02:13.329225 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:02:13 crc kubenswrapper[4926]: E1125 19:02:13.331878 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:02:15 crc kubenswrapper[4926]: I1125 19:02:15.835290 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bkrfz"] Nov 25 19:02:15 crc kubenswrapper[4926]: E1125 19:02:15.836100 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d752f8f9-0324-4383-9157-f1e23a46572b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 19:02:15 crc kubenswrapper[4926]: I1125 19:02:15.836117 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d752f8f9-0324-4383-9157-f1e23a46572b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 19:02:15 crc kubenswrapper[4926]: E1125 19:02:15.836165 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2a52929-6ede-453c-a04b-cbe357ca6476" containerName="keystone-cron" Nov 25 19:02:15 crc kubenswrapper[4926]: I1125 19:02:15.836173 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2a52929-6ede-453c-a04b-cbe357ca6476" containerName="keystone-cron" Nov 25 19:02:15 crc kubenswrapper[4926]: I1125 19:02:15.836415 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d752f8f9-0324-4383-9157-f1e23a46572b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 19:02:15 crc kubenswrapper[4926]: I1125 19:02:15.836438 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2a52929-6ede-453c-a04b-cbe357ca6476" containerName="keystone-cron" Nov 25 19:02:15 crc kubenswrapper[4926]: I1125 19:02:15.838143 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:15 crc kubenswrapper[4926]: I1125 19:02:15.847081 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bkrfz"] Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.017445 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-catalog-content\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.017876 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jkvk\" (UniqueName: \"kubernetes.io/projected/ddb380a1-7170-49d0-b587-c6abd8d73cbc-kube-api-access-7jkvk\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.018022 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-utilities\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.119770 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-catalog-content\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.119880 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jkvk\" (UniqueName: \"kubernetes.io/projected/ddb380a1-7170-49d0-b587-c6abd8d73cbc-kube-api-access-7jkvk\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.119917 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-utilities\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.120551 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-utilities\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.120750 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-catalog-content\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.142395 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jkvk\" (UniqueName: \"kubernetes.io/projected/ddb380a1-7170-49d0-b587-c6abd8d73cbc-kube-api-access-7jkvk\") pod \"redhat-operators-bkrfz\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.186051 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:16 crc kubenswrapper[4926]: I1125 19:02:16.698771 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bkrfz"] Nov 25 19:02:17 crc kubenswrapper[4926]: I1125 19:02:17.328595 4926 generic.go:334] "Generic (PLEG): container finished" podID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerID="63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305" exitCode=0 Nov 25 19:02:17 crc kubenswrapper[4926]: I1125 19:02:17.328721 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkrfz" event={"ID":"ddb380a1-7170-49d0-b587-c6abd8d73cbc","Type":"ContainerDied","Data":"63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305"} Nov 25 19:02:17 crc kubenswrapper[4926]: I1125 19:02:17.328813 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkrfz" event={"ID":"ddb380a1-7170-49d0-b587-c6abd8d73cbc","Type":"ContainerStarted","Data":"6cdd9c95308e1fb8f0af81fbfb8129e92d6c43022740d3f7e84b418f3c4be442"} Nov 25 19:02:17 crc kubenswrapper[4926]: I1125 19:02:17.330358 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 19:02:18 crc kubenswrapper[4926]: I1125 19:02:18.344146 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkrfz" event={"ID":"ddb380a1-7170-49d0-b587-c6abd8d73cbc","Type":"ContainerStarted","Data":"cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f"} Nov 25 19:02:19 crc kubenswrapper[4926]: I1125 19:02:19.356746 4926 generic.go:334] "Generic (PLEG): container finished" podID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerID="cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f" exitCode=0 Nov 25 19:02:19 crc kubenswrapper[4926]: I1125 19:02:19.356831 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkrfz" event={"ID":"ddb380a1-7170-49d0-b587-c6abd8d73cbc","Type":"ContainerDied","Data":"cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f"} Nov 25 19:02:20 crc kubenswrapper[4926]: I1125 19:02:20.375973 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkrfz" event={"ID":"ddb380a1-7170-49d0-b587-c6abd8d73cbc","Type":"ContainerStarted","Data":"61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f"} Nov 25 19:02:20 crc kubenswrapper[4926]: I1125 19:02:20.393818 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bkrfz" podStartSLOduration=2.942839706 podStartE2EDuration="5.393801029s" podCreationTimestamp="2025-11-25 19:02:15 +0000 UTC" firstStartedPulling="2025-11-25 19:02:17.330183817 +0000 UTC m=+2967.715697422" lastFinishedPulling="2025-11-25 19:02:19.78114511 +0000 UTC m=+2970.166658745" observedRunningTime="2025-11-25 19:02:20.39055876 +0000 UTC m=+2970.776072385" watchObservedRunningTime="2025-11-25 19:02:20.393801029 +0000 UTC m=+2970.779314634" Nov 25 19:02:26 crc kubenswrapper[4926]: I1125 19:02:26.187329 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:26 crc kubenswrapper[4926]: I1125 19:02:26.188076 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:27 crc kubenswrapper[4926]: I1125 19:02:27.244316 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bkrfz" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="registry-server" probeResult="failure" output=< Nov 25 19:02:27 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 19:02:27 crc kubenswrapper[4926]: > Nov 25 19:02:27 crc kubenswrapper[4926]: I1125 19:02:27.330208 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:02:27 crc kubenswrapper[4926]: E1125 19:02:27.330879 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.528808 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.530744 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.533018 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.547279 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.649917 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.656771 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.659803 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-config-data" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.667890 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.675577 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.677412 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.679542 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-2-config-data" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.683300 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.722538 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-dev\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.722587 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-config-data\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.722606 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-config-data-custom\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.722786 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-scripts\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.722923 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723012 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctbg4\" (UniqueName: \"kubernetes.io/projected/7d6bbb74-3796-44c9-a153-84fd8de6f338-kube-api-access-ctbg4\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723102 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723188 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-run\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723265 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723330 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-lib-modules\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723440 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723478 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723596 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-nvme\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723655 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-sys\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.723708 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825005 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825050 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825083 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825102 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zggv\" (UniqueName: \"kubernetes.io/projected/7a824532-6df2-4a8d-a6ae-1859686e6bb5-kube-api-access-2zggv\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825119 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825134 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825158 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825186 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-nvme\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825210 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-sys\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825232 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825253 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825268 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-dev\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825286 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-config-data\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825301 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-config-data-custom\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825315 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825331 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825348 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825365 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825399 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825414 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825435 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-scripts\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825454 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825473 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825494 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825510 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825528 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825556 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz6fs\" (UniqueName: \"kubernetes.io/projected/899c3fbb-eb5b-46b6-b535-27d400c4b40e-kube-api-access-rz6fs\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825576 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctbg4\" (UniqueName: \"kubernetes.io/projected/7d6bbb74-3796-44c9-a153-84fd8de6f338-kube-api-access-ctbg4\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825592 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825612 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825627 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825644 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825660 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825673 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-run\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825693 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825707 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825720 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-sys\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825741 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825759 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-run\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825774 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825795 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-dev\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825817 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825836 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825855 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-lib-modules\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.825882 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.826036 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-nvme\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.826285 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-sys\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.826318 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.826347 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-dev\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.826927 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.827025 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.827095 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.827187 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-lib-modules\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.827257 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.827204 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7d6bbb74-3796-44c9-a153-84fd8de6f338-run\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.830555 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.831062 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-config-data-custom\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.833033 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-scripts\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.836915 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d6bbb74-3796-44c9-a153-84fd8de6f338-config-data\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.844707 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctbg4\" (UniqueName: \"kubernetes.io/projected/7d6bbb74-3796-44c9-a153-84fd8de6f338-kube-api-access-ctbg4\") pod \"cinder-backup-0\" (UID: \"7d6bbb74-3796-44c9-a153-84fd8de6f338\") " pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.858238 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927569 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz6fs\" (UniqueName: \"kubernetes.io/projected/899c3fbb-eb5b-46b6-b535-27d400c4b40e-kube-api-access-rz6fs\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927669 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927694 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927720 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-run\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927766 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927789 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927812 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-sys\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927843 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927901 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-dev\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927936 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.927972 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928021 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zggv\" (UniqueName: \"kubernetes.io/projected/7a824532-6df2-4a8d-a6ae-1859686e6bb5-kube-api-access-2zggv\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928067 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928088 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928120 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928189 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928219 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928240 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928265 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928296 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928325 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928344 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928398 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928424 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928452 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928477 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.928612 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930405 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930469 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930494 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930560 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930573 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930615 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930618 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-sys\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.930655 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931153 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931629 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931665 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931736 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931739 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/7a824532-6df2-4a8d-a6ae-1859686e6bb5-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931782 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-run\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.931792 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.933958 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.934018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-dev\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.934062 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/899c3fbb-eb5b-46b6-b535-27d400c4b40e-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.935258 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.938922 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.939287 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.939290 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.944746 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.945319 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz6fs\" (UniqueName: \"kubernetes.io/projected/899c3fbb-eb5b-46b6-b535-27d400c4b40e-kube-api-access-rz6fs\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.947799 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.950242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/899c3fbb-eb5b-46b6-b535-27d400c4b40e-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"899c3fbb-eb5b-46b6-b535-27d400c4b40e\") " pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.950824 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a824532-6df2-4a8d-a6ae-1859686e6bb5-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.953815 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zggv\" (UniqueName: \"kubernetes.io/projected/7a824532-6df2-4a8d-a6ae-1859686e6bb5-kube-api-access-2zggv\") pod \"cinder-volume-nfs-2-0\" (UID: \"7a824532-6df2-4a8d-a6ae-1859686e6bb5\") " pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:28 crc kubenswrapper[4926]: I1125 19:02:28.975093 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:29 crc kubenswrapper[4926]: I1125 19:02:29.000858 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:29 crc kubenswrapper[4926]: I1125 19:02:29.450189 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 19:02:29 crc kubenswrapper[4926]: I1125 19:02:29.469878 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7d6bbb74-3796-44c9-a153-84fd8de6f338","Type":"ContainerStarted","Data":"6b45a9d3c3a699645934f1ccc262420e313ea6cff24405b04114f3a6a20bfcfe"} Nov 25 19:02:29 crc kubenswrapper[4926]: I1125 19:02:29.601429 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.194985 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.491287 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7d6bbb74-3796-44c9-a153-84fd8de6f338","Type":"ContainerStarted","Data":"2c18047ce6ede8eb15a3f763a7b5b86b8fc80996ed6d4f31d96d342d20594e74"} Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.491845 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7d6bbb74-3796-44c9-a153-84fd8de6f338","Type":"ContainerStarted","Data":"cd8e934b1c32c1d795367e8500d9af927d7b7f461a9c65499d156a46364ddfda"} Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.493205 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"899c3fbb-eb5b-46b6-b535-27d400c4b40e","Type":"ContainerStarted","Data":"4e9abf6ec2d5c003fb5d4992bafa57c4f0331ac2421329cb1146be37b2c5ffdc"} Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.493241 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"899c3fbb-eb5b-46b6-b535-27d400c4b40e","Type":"ContainerStarted","Data":"cfe9e3a1654211675d1d5138eeac0bba4de679c1f8a8cb8d3d06eaa62fdaa609"} Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.495234 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"7a824532-6df2-4a8d-a6ae-1859686e6bb5","Type":"ContainerStarted","Data":"a891f53a967bf478a2f1ed93fcc1a08c75844e02bdd9ceeb4716950e1abba329"} Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.495264 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"7a824532-6df2-4a8d-a6ae-1859686e6bb5","Type":"ContainerStarted","Data":"6222abf7d6eadc2029c22566b1527522e45e4346b4cc1b3a71e3ad27c3937f82"} Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.495273 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"7a824532-6df2-4a8d-a6ae-1859686e6bb5","Type":"ContainerStarted","Data":"44d23fda883147400d72c9c62f7a740c23e6ab9d5a5097ebd6ec3a58f7e39b14"} Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.516767 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.336699609 podStartE2EDuration="2.516745068s" podCreationTimestamp="2025-11-25 19:02:28 +0000 UTC" firstStartedPulling="2025-11-25 19:02:29.445476747 +0000 UTC m=+2979.830990352" lastFinishedPulling="2025-11-25 19:02:29.625522196 +0000 UTC m=+2980.011035811" observedRunningTime="2025-11-25 19:02:30.511462663 +0000 UTC m=+2980.896976268" watchObservedRunningTime="2025-11-25 19:02:30.516745068 +0000 UTC m=+2980.902258683" Nov 25 19:02:30 crc kubenswrapper[4926]: I1125 19:02:30.546680 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-2-0" podStartSLOduration=2.346867567 podStartE2EDuration="2.546658208s" podCreationTimestamp="2025-11-25 19:02:28 +0000 UTC" firstStartedPulling="2025-11-25 19:02:29.623016078 +0000 UTC m=+2980.008529683" lastFinishedPulling="2025-11-25 19:02:29.822806709 +0000 UTC m=+2980.208320324" observedRunningTime="2025-11-25 19:02:30.540979682 +0000 UTC m=+2980.926493297" watchObservedRunningTime="2025-11-25 19:02:30.546658208 +0000 UTC m=+2980.932171813" Nov 25 19:02:31 crc kubenswrapper[4926]: I1125 19:02:31.521671 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"899c3fbb-eb5b-46b6-b535-27d400c4b40e","Type":"ContainerStarted","Data":"568d5467004132ffc59588077913128028e024abbf38077f5224c7993cdba694"} Nov 25 19:02:31 crc kubenswrapper[4926]: I1125 19:02:31.562932 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-0" podStartSLOduration=3.56291117 podStartE2EDuration="3.56291117s" podCreationTimestamp="2025-11-25 19:02:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 19:02:31.555666281 +0000 UTC m=+2981.941179906" watchObservedRunningTime="2025-11-25 19:02:31.56291117 +0000 UTC m=+2981.948424775" Nov 25 19:02:33 crc kubenswrapper[4926]: I1125 19:02:33.859531 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 25 19:02:33 crc kubenswrapper[4926]: I1125 19:02:33.976123 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:34 crc kubenswrapper[4926]: I1125 19:02:34.001251 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:34 crc kubenswrapper[4926]: I1125 19:02:34.851139 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fkz8w"] Nov 25 19:02:34 crc kubenswrapper[4926]: I1125 19:02:34.856493 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:34 crc kubenswrapper[4926]: I1125 19:02:34.912477 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-utilities\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:34 crc kubenswrapper[4926]: I1125 19:02:34.912527 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-catalog-content\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:34 crc kubenswrapper[4926]: I1125 19:02:34.912766 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqrqc\" (UniqueName: \"kubernetes.io/projected/275b05f8-9361-4860-9b51-daa4426ba67a-kube-api-access-pqrqc\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:34 crc kubenswrapper[4926]: I1125 19:02:34.924741 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fkz8w"] Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.014518 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-catalog-content\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.014719 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqrqc\" (UniqueName: \"kubernetes.io/projected/275b05f8-9361-4860-9b51-daa4426ba67a-kube-api-access-pqrqc\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.014825 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-utilities\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.015071 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-catalog-content\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.015289 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-utilities\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.043256 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqrqc\" (UniqueName: \"kubernetes.io/projected/275b05f8-9361-4860-9b51-daa4426ba67a-kube-api-access-pqrqc\") pod \"certified-operators-fkz8w\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.192767 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:35 crc kubenswrapper[4926]: I1125 19:02:35.630064 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fkz8w"] Nov 25 19:02:36 crc kubenswrapper[4926]: I1125 19:02:36.589264 4926 generic.go:334] "Generic (PLEG): container finished" podID="275b05f8-9361-4860-9b51-daa4426ba67a" containerID="77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c" exitCode=0 Nov 25 19:02:36 crc kubenswrapper[4926]: I1125 19:02:36.589567 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fkz8w" event={"ID":"275b05f8-9361-4860-9b51-daa4426ba67a","Type":"ContainerDied","Data":"77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c"} Nov 25 19:02:36 crc kubenswrapper[4926]: I1125 19:02:36.590630 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fkz8w" event={"ID":"275b05f8-9361-4860-9b51-daa4426ba67a","Type":"ContainerStarted","Data":"03e95ebc41a5b655943abb16d42184c37edd2e0cec9ef9d8606217e1511bc7cc"} Nov 25 19:02:37 crc kubenswrapper[4926]: I1125 19:02:37.231543 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bkrfz" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="registry-server" probeResult="failure" output=< Nov 25 19:02:37 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 19:02:37 crc kubenswrapper[4926]: > Nov 25 19:02:37 crc kubenswrapper[4926]: I1125 19:02:37.614772 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fkz8w" event={"ID":"275b05f8-9361-4860-9b51-daa4426ba67a","Type":"ContainerStarted","Data":"3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c"} Nov 25 19:02:39 crc kubenswrapper[4926]: I1125 19:02:39.032767 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 25 19:02:39 crc kubenswrapper[4926]: I1125 19:02:39.610685 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:02:39 crc kubenswrapper[4926]: I1125 19:02:39.612770 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-0" Nov 25 19:02:39 crc kubenswrapper[4926]: I1125 19:02:39.646198 4926 generic.go:334] "Generic (PLEG): container finished" podID="275b05f8-9361-4860-9b51-daa4426ba67a" containerID="3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c" exitCode=0 Nov 25 19:02:39 crc kubenswrapper[4926]: I1125 19:02:39.646235 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fkz8w" event={"ID":"275b05f8-9361-4860-9b51-daa4426ba67a","Type":"ContainerDied","Data":"3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c"} Nov 25 19:02:40 crc kubenswrapper[4926]: I1125 19:02:40.335445 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:02:40 crc kubenswrapper[4926]: E1125 19:02:40.338500 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:02:40 crc kubenswrapper[4926]: I1125 19:02:40.657916 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fkz8w" event={"ID":"275b05f8-9361-4860-9b51-daa4426ba67a","Type":"ContainerStarted","Data":"14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3"} Nov 25 19:02:40 crc kubenswrapper[4926]: I1125 19:02:40.684829 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fkz8w" podStartSLOduration=3.204926251 podStartE2EDuration="6.684811744s" podCreationTimestamp="2025-11-25 19:02:34 +0000 UTC" firstStartedPulling="2025-11-25 19:02:36.600875199 +0000 UTC m=+2986.986388804" lastFinishedPulling="2025-11-25 19:02:40.080760692 +0000 UTC m=+2990.466274297" observedRunningTime="2025-11-25 19:02:40.679814077 +0000 UTC m=+2991.065327692" watchObservedRunningTime="2025-11-25 19:02:40.684811744 +0000 UTC m=+2991.070325349" Nov 25 19:02:45 crc kubenswrapper[4926]: I1125 19:02:45.192858 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:45 crc kubenswrapper[4926]: I1125 19:02:45.193620 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:46 crc kubenswrapper[4926]: I1125 19:02:46.255341 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-fkz8w" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="registry-server" probeResult="failure" output=< Nov 25 19:02:46 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 19:02:46 crc kubenswrapper[4926]: > Nov 25 19:02:47 crc kubenswrapper[4926]: I1125 19:02:47.258670 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bkrfz" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="registry-server" probeResult="failure" output=< Nov 25 19:02:47 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 19:02:47 crc kubenswrapper[4926]: > Nov 25 19:02:52 crc kubenswrapper[4926]: I1125 19:02:52.329528 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:02:52 crc kubenswrapper[4926]: E1125 19:02:52.330296 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:02:55 crc kubenswrapper[4926]: I1125 19:02:55.250983 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:55 crc kubenswrapper[4926]: I1125 19:02:55.301121 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:55 crc kubenswrapper[4926]: I1125 19:02:55.492615 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fkz8w"] Nov 25 19:02:56 crc kubenswrapper[4926]: I1125 19:02:56.268395 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:56 crc kubenswrapper[4926]: I1125 19:02:56.352722 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:56 crc kubenswrapper[4926]: I1125 19:02:56.838637 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fkz8w" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="registry-server" containerID="cri-o://14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3" gracePeriod=2 Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.411595 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.560282 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-catalog-content\") pod \"275b05f8-9361-4860-9b51-daa4426ba67a\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.560751 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-utilities\") pod \"275b05f8-9361-4860-9b51-daa4426ba67a\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.561744 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-utilities" (OuterVolumeSpecName: "utilities") pod "275b05f8-9361-4860-9b51-daa4426ba67a" (UID: "275b05f8-9361-4860-9b51-daa4426ba67a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.561857 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqrqc\" (UniqueName: \"kubernetes.io/projected/275b05f8-9361-4860-9b51-daa4426ba67a-kube-api-access-pqrqc\") pod \"275b05f8-9361-4860-9b51-daa4426ba67a\" (UID: \"275b05f8-9361-4860-9b51-daa4426ba67a\") " Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.563618 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.568458 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/275b05f8-9361-4860-9b51-daa4426ba67a-kube-api-access-pqrqc" (OuterVolumeSpecName: "kube-api-access-pqrqc") pod "275b05f8-9361-4860-9b51-daa4426ba67a" (UID: "275b05f8-9361-4860-9b51-daa4426ba67a"). InnerVolumeSpecName "kube-api-access-pqrqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.622492 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "275b05f8-9361-4860-9b51-daa4426ba67a" (UID: "275b05f8-9361-4860-9b51-daa4426ba67a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.666252 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/275b05f8-9361-4860-9b51-daa4426ba67a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.666312 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqrqc\" (UniqueName: \"kubernetes.io/projected/275b05f8-9361-4860-9b51-daa4426ba67a-kube-api-access-pqrqc\") on node \"crc\" DevicePath \"\"" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.850041 4926 generic.go:334] "Generic (PLEG): container finished" podID="275b05f8-9361-4860-9b51-daa4426ba67a" containerID="14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3" exitCode=0 Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.850086 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fkz8w" event={"ID":"275b05f8-9361-4860-9b51-daa4426ba67a","Type":"ContainerDied","Data":"14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3"} Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.850116 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fkz8w" event={"ID":"275b05f8-9361-4860-9b51-daa4426ba67a","Type":"ContainerDied","Data":"03e95ebc41a5b655943abb16d42184c37edd2e0cec9ef9d8606217e1511bc7cc"} Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.850114 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fkz8w" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.850150 4926 scope.go:117] "RemoveContainer" containerID="14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.887098 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fkz8w"] Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.894558 4926 scope.go:117] "RemoveContainer" containerID="3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.895740 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fkz8w"] Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.927766 4926 scope.go:117] "RemoveContainer" containerID="77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.984289 4926 scope.go:117] "RemoveContainer" containerID="14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3" Nov 25 19:02:57 crc kubenswrapper[4926]: E1125 19:02:57.985220 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3\": container with ID starting with 14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3 not found: ID does not exist" containerID="14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.985266 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3"} err="failed to get container status \"14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3\": rpc error: code = NotFound desc = could not find container \"14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3\": container with ID starting with 14325216de445242cf75b44d366fc62dbec163609e118c55e86970eba6e5eaa3 not found: ID does not exist" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.985303 4926 scope.go:117] "RemoveContainer" containerID="3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c" Nov 25 19:02:57 crc kubenswrapper[4926]: E1125 19:02:57.985820 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c\": container with ID starting with 3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c not found: ID does not exist" containerID="3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.985881 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c"} err="failed to get container status \"3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c\": rpc error: code = NotFound desc = could not find container \"3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c\": container with ID starting with 3a823ecb3568341367978569e0afa5959685bb2b7c04660a65edfea622b4ac0c not found: ID does not exist" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.985910 4926 scope.go:117] "RemoveContainer" containerID="77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c" Nov 25 19:02:57 crc kubenswrapper[4926]: E1125 19:02:57.986366 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c\": container with ID starting with 77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c not found: ID does not exist" containerID="77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c" Nov 25 19:02:57 crc kubenswrapper[4926]: I1125 19:02:57.986428 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c"} err="failed to get container status \"77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c\": rpc error: code = NotFound desc = could not find container \"77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c\": container with ID starting with 77778b50f121c0c72716e75a443dd72fe8899acc4b59dff9b4db8e4a77ee4e7c not found: ID does not exist" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.292726 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bkrfz"] Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.292966 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bkrfz" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="registry-server" containerID="cri-o://61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f" gracePeriod=2 Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.358880 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" path="/var/lib/kubelet/pods/275b05f8-9361-4860-9b51-daa4426ba67a/volumes" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.770008 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.864052 4926 generic.go:334] "Generic (PLEG): container finished" podID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerID="61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f" exitCode=0 Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.864157 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkrfz" event={"ID":"ddb380a1-7170-49d0-b587-c6abd8d73cbc","Type":"ContainerDied","Data":"61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f"} Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.864451 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkrfz" event={"ID":"ddb380a1-7170-49d0-b587-c6abd8d73cbc","Type":"ContainerDied","Data":"6cdd9c95308e1fb8f0af81fbfb8129e92d6c43022740d3f7e84b418f3c4be442"} Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.864228 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkrfz" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.864481 4926 scope.go:117] "RemoveContainer" containerID="61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.890858 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-utilities\") pod \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.891106 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jkvk\" (UniqueName: \"kubernetes.io/projected/ddb380a1-7170-49d0-b587-c6abd8d73cbc-kube-api-access-7jkvk\") pod \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.891161 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-catalog-content\") pod \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\" (UID: \"ddb380a1-7170-49d0-b587-c6abd8d73cbc\") " Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.891902 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-utilities" (OuterVolumeSpecName: "utilities") pod "ddb380a1-7170-49d0-b587-c6abd8d73cbc" (UID: "ddb380a1-7170-49d0-b587-c6abd8d73cbc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.892106 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.898826 4926 scope.go:117] "RemoveContainer" containerID="cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.909817 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddb380a1-7170-49d0-b587-c6abd8d73cbc-kube-api-access-7jkvk" (OuterVolumeSpecName: "kube-api-access-7jkvk") pod "ddb380a1-7170-49d0-b587-c6abd8d73cbc" (UID: "ddb380a1-7170-49d0-b587-c6abd8d73cbc"). InnerVolumeSpecName "kube-api-access-7jkvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:02:58 crc kubenswrapper[4926]: I1125 19:02:58.995084 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jkvk\" (UniqueName: \"kubernetes.io/projected/ddb380a1-7170-49d0-b587-c6abd8d73cbc-kube-api-access-7jkvk\") on node \"crc\" DevicePath \"\"" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.023223 4926 scope.go:117] "RemoveContainer" containerID="63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.026732 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ddb380a1-7170-49d0-b587-c6abd8d73cbc" (UID: "ddb380a1-7170-49d0-b587-c6abd8d73cbc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.062166 4926 scope.go:117] "RemoveContainer" containerID="61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f" Nov 25 19:02:59 crc kubenswrapper[4926]: E1125 19:02:59.062828 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f\": container with ID starting with 61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f not found: ID does not exist" containerID="61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.062894 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f"} err="failed to get container status \"61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f\": rpc error: code = NotFound desc = could not find container \"61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f\": container with ID starting with 61236154bd785f608f9052338a5d2c59d42888cc02aa27397c67501364f7160f not found: ID does not exist" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.062919 4926 scope.go:117] "RemoveContainer" containerID="cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f" Nov 25 19:02:59 crc kubenswrapper[4926]: E1125 19:02:59.063170 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f\": container with ID starting with cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f not found: ID does not exist" containerID="cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.063220 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f"} err="failed to get container status \"cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f\": rpc error: code = NotFound desc = could not find container \"cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f\": container with ID starting with cb7dde829b9d449a426196c2030bd0fadc5f226652ff60d54bbf1ace840cea1f not found: ID does not exist" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.063234 4926 scope.go:117] "RemoveContainer" containerID="63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305" Nov 25 19:02:59 crc kubenswrapper[4926]: E1125 19:02:59.063632 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305\": container with ID starting with 63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305 not found: ID does not exist" containerID="63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.063655 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305"} err="failed to get container status \"63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305\": rpc error: code = NotFound desc = could not find container \"63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305\": container with ID starting with 63978e1755665e6b43704fea58881c4b5976b1bb6df9cfa5947561cb897e1305 not found: ID does not exist" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.096629 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb380a1-7170-49d0-b587-c6abd8d73cbc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.198356 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bkrfz"] Nov 25 19:02:59 crc kubenswrapper[4926]: I1125 19:02:59.235963 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bkrfz"] Nov 25 19:03:00 crc kubenswrapper[4926]: I1125 19:03:00.342877 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" path="/var/lib/kubelet/pods/ddb380a1-7170-49d0-b587-c6abd8d73cbc/volumes" Nov 25 19:03:04 crc kubenswrapper[4926]: I1125 19:03:04.329926 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:03:04 crc kubenswrapper[4926]: E1125 19:03:04.330707 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:03:18 crc kubenswrapper[4926]: I1125 19:03:18.329419 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:03:18 crc kubenswrapper[4926]: E1125 19:03:18.330199 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:03:31 crc kubenswrapper[4926]: I1125 19:03:31.362981 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 19:03:32 crc kubenswrapper[4926]: I1125 19:03:32.267788 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="thanos-sidecar" containerID="cri-o://57f73daf6657f6aabff98fc989249aba5a37203487312f893612b8697a828bc6" gracePeriod=600 Nov 25 19:03:32 crc kubenswrapper[4926]: I1125 19:03:32.267896 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="config-reloader" containerID="cri-o://c36a7da63701ddf686eb09fed950b8fbe57acfdfd9501f9309e0108b7c663f69" gracePeriod=600 Nov 25 19:03:32 crc kubenswrapper[4926]: I1125 19:03:32.268569 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="prometheus" containerID="cri-o://e90e2d4d25524df90364d719472780a27ef1a9cb1bc582b85a62fbb7f338d386" gracePeriod=600 Nov 25 19:03:33 crc kubenswrapper[4926]: I1125 19:03:33.285012 4926 generic.go:334] "Generic (PLEG): container finished" podID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerID="57f73daf6657f6aabff98fc989249aba5a37203487312f893612b8697a828bc6" exitCode=0 Nov 25 19:03:33 crc kubenswrapper[4926]: I1125 19:03:33.285470 4926 generic.go:334] "Generic (PLEG): container finished" podID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerID="c36a7da63701ddf686eb09fed950b8fbe57acfdfd9501f9309e0108b7c663f69" exitCode=0 Nov 25 19:03:33 crc kubenswrapper[4926]: I1125 19:03:33.285493 4926 generic.go:334] "Generic (PLEG): container finished" podID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerID="e90e2d4d25524df90364d719472780a27ef1a9cb1bc582b85a62fbb7f338d386" exitCode=0 Nov 25 19:03:33 crc kubenswrapper[4926]: I1125 19:03:33.285155 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerDied","Data":"57f73daf6657f6aabff98fc989249aba5a37203487312f893612b8697a828bc6"} Nov 25 19:03:33 crc kubenswrapper[4926]: I1125 19:03:33.285557 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerDied","Data":"c36a7da63701ddf686eb09fed950b8fbe57acfdfd9501f9309e0108b7c663f69"} Nov 25 19:03:33 crc kubenswrapper[4926]: I1125 19:03:33.285609 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerDied","Data":"e90e2d4d25524df90364d719472780a27ef1a9cb1bc582b85a62fbb7f338d386"} Nov 25 19:03:33 crc kubenswrapper[4926]: I1125 19:03:33.329888 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:03:33 crc kubenswrapper[4926]: E1125 19:03:33.330437 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.103095 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.203090 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config-out\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.203424 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-tls-assets\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.203469 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ca984ebe-e3bd-4720-9718-4cb972ee65e5-prometheus-metric-storage-rulefiles-0\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.203540 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.203559 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-secret-combined-ca-bundle\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.203586 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.204230 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.204325 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-thanos-prometheus-http-client-file\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.204352 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48wrh\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-kube-api-access-48wrh\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.204396 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.204446 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\" (UID: \"ca984ebe-e3bd-4720-9718-4cb972ee65e5\") " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.205215 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca984ebe-e3bd-4720-9718-4cb972ee65e5-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.212832 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.213476 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.216146 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.219253 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.220624 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.220727 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config-out" (OuterVolumeSpecName: "config-out") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.222933 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-kube-api-access-48wrh" (OuterVolumeSpecName: "kube-api-access-48wrh") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "kube-api-access-48wrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.223583 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config" (OuterVolumeSpecName: "config") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.261240 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.309737 4926 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310562 4926 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310645 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") on node \"crc\" " Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310716 4926 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310770 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48wrh\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-kube-api-access-48wrh\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310832 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310899 4926 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310960 4926 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ca984ebe-e3bd-4720-9718-4cb972ee65e5-config-out\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.311011 4926 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ca984ebe-e3bd-4720-9718-4cb972ee65e5-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.311062 4926 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ca984ebe-e3bd-4720-9718-4cb972ee65e5-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310356 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.310252 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ca984ebe-e3bd-4720-9718-4cb972ee65e5","Type":"ContainerDied","Data":"62fd802b0e4c2c79de2407504f585b3193c10611e6ff8e5a2b2b9c55c388275e"} Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.311538 4926 scope.go:117] "RemoveContainer" containerID="57f73daf6657f6aabff98fc989249aba5a37203487312f893612b8697a828bc6" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.312401 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config" (OuterVolumeSpecName: "web-config") pod "ca984ebe-e3bd-4720-9718-4cb972ee65e5" (UID: "ca984ebe-e3bd-4720-9718-4cb972ee65e5"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.356973 4926 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.357208 4926 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc") on node "crc" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.382669 4926 scope.go:117] "RemoveContainer" containerID="c36a7da63701ddf686eb09fed950b8fbe57acfdfd9501f9309e0108b7c663f69" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.407005 4926 scope.go:117] "RemoveContainer" containerID="e90e2d4d25524df90364d719472780a27ef1a9cb1bc582b85a62fbb7f338d386" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.412870 4926 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ca984ebe-e3bd-4720-9718-4cb972ee65e5-web-config\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.412909 4926 reconciler_common.go:293] "Volume detached for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") on node \"crc\" DevicePath \"\"" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.442895 4926 scope.go:117] "RemoveContainer" containerID="f360e063c64343b139d39e166b339300b1a923e9747d14997cdf3f93efb2506f" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.645815 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.682055 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.749900 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756245 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="extract-content" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756282 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="extract-content" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756316 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="prometheus" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756325 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="prometheus" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756337 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="extract-utilities" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756345 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="extract-utilities" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756363 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="registry-server" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756387 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="registry-server" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756423 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="thanos-sidecar" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756431 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="thanos-sidecar" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756460 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="config-reloader" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756468 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="config-reloader" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756522 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="init-config-reloader" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756532 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="init-config-reloader" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756549 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="registry-server" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756556 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="registry-server" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756573 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="extract-content" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756583 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="extract-content" Nov 25 19:03:34 crc kubenswrapper[4926]: E1125 19:03:34.756612 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="extract-utilities" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.756619 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="extract-utilities" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.757200 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddb380a1-7170-49d0-b587-c6abd8d73cbc" containerName="registry-server" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.757243 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="275b05f8-9361-4860-9b51-daa4426ba67a" containerName="registry-server" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.757263 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="config-reloader" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.757298 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="thanos-sidecar" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.757322 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" containerName="prometheus" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.787645 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.794903 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.795102 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.797626 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.797905 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-9xpwm" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.798001 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.798679 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.824457 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.835562 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.835809 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.835909 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/301464c9-c4d4-4b22-8d83-1df733e32f25-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.835977 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.836057 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.836121 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/301464c9-c4d4-4b22-8d83-1df733e32f25-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.836183 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfbsp\" (UniqueName: \"kubernetes.io/projected/301464c9-c4d4-4b22-8d83-1df733e32f25-kube-api-access-rfbsp\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.836281 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/301464c9-c4d4-4b22-8d83-1df733e32f25-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.836343 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.836486 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-config\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.836572 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.937925 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/301464c9-c4d4-4b22-8d83-1df733e32f25-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.937965 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938028 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-config\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938057 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938082 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938773 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938811 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/301464c9-c4d4-4b22-8d83-1df733e32f25-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938951 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.938975 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/301464c9-c4d4-4b22-8d83-1df733e32f25-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.939019 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfbsp\" (UniqueName: \"kubernetes.io/projected/301464c9-c4d4-4b22-8d83-1df733e32f25-kube-api-access-rfbsp\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.942810 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/301464c9-c4d4-4b22-8d83-1df733e32f25-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.943352 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.943546 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.945067 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/301464c9-c4d4-4b22-8d83-1df733e32f25-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.945899 4926 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.945994 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/967f74eafc5d0ef2758f2567e8e6584104bc92d0318f34ed949bfc88cba8d50f/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.946689 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.946929 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/301464c9-c4d4-4b22-8d83-1df733e32f25-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.947095 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.948716 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-config\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.950477 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/301464c9-c4d4-4b22-8d83-1df733e32f25-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.960855 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfbsp\" (UniqueName: \"kubernetes.io/projected/301464c9-c4d4-4b22-8d83-1df733e32f25-kube-api-access-rfbsp\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:34 crc kubenswrapper[4926]: I1125 19:03:34.981284 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8cceee5c-3850-47bd-be0e-b248b31d07cc\") pod \"prometheus-metric-storage-0\" (UID: \"301464c9-c4d4-4b22-8d83-1df733e32f25\") " pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:35 crc kubenswrapper[4926]: I1125 19:03:35.123754 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 19:03:35 crc kubenswrapper[4926]: I1125 19:03:35.611804 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 19:03:36 crc kubenswrapper[4926]: I1125 19:03:36.341998 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca984ebe-e3bd-4720-9718-4cb972ee65e5" path="/var/lib/kubelet/pods/ca984ebe-e3bd-4720-9718-4cb972ee65e5/volumes" Nov 25 19:03:36 crc kubenswrapper[4926]: I1125 19:03:36.343276 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"301464c9-c4d4-4b22-8d83-1df733e32f25","Type":"ContainerStarted","Data":"1f7c6a2eac2e9b3a5d2c4452c093d61d2394f9109a9ca69ced3f2b5568de4b01"} Nov 25 19:03:41 crc kubenswrapper[4926]: I1125 19:03:41.440197 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"301464c9-c4d4-4b22-8d83-1df733e32f25","Type":"ContainerStarted","Data":"b65f9df3efc5ef18f2f0a5d49d480ad1506df39d04e392143132383e09dd0930"} Nov 25 19:03:48 crc kubenswrapper[4926]: I1125 19:03:48.329700 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:03:48 crc kubenswrapper[4926]: E1125 19:03:48.330667 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:03:51 crc kubenswrapper[4926]: I1125 19:03:51.560898 4926 generic.go:334] "Generic (PLEG): container finished" podID="301464c9-c4d4-4b22-8d83-1df733e32f25" containerID="b65f9df3efc5ef18f2f0a5d49d480ad1506df39d04e392143132383e09dd0930" exitCode=0 Nov 25 19:03:51 crc kubenswrapper[4926]: I1125 19:03:51.560975 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"301464c9-c4d4-4b22-8d83-1df733e32f25","Type":"ContainerDied","Data":"b65f9df3efc5ef18f2f0a5d49d480ad1506df39d04e392143132383e09dd0930"} Nov 25 19:03:52 crc kubenswrapper[4926]: I1125 19:03:52.578114 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"301464c9-c4d4-4b22-8d83-1df733e32f25","Type":"ContainerStarted","Data":"8fe0cb1b8a08902d09d8019565edf42b2a7619a6cb82c52a9c142e2ecfea496b"} Nov 25 19:03:56 crc kubenswrapper[4926]: I1125 19:03:56.618656 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"301464c9-c4d4-4b22-8d83-1df733e32f25","Type":"ContainerStarted","Data":"db0e93d81fc6b5b085acf595360512d273b66ec4b00d9900f4f63dfe7b3fdef7"} Nov 25 19:03:56 crc kubenswrapper[4926]: I1125 19:03:56.619113 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"301464c9-c4d4-4b22-8d83-1df733e32f25","Type":"ContainerStarted","Data":"1765052cd26b3da824bfbf9fa2fdb16ef102e8f878dcf9bd0f6e33b024eb2df7"} Nov 25 19:03:56 crc kubenswrapper[4926]: I1125 19:03:56.660091 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=22.66007019 podStartE2EDuration="22.66007019s" podCreationTimestamp="2025-11-25 19:03:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 19:03:56.652018218 +0000 UTC m=+3067.037531853" watchObservedRunningTime="2025-11-25 19:03:56.66007019 +0000 UTC m=+3067.045583815" Nov 25 19:04:00 crc kubenswrapper[4926]: I1125 19:04:00.124903 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 19:04:01 crc kubenswrapper[4926]: I1125 19:04:01.329801 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:04:01 crc kubenswrapper[4926]: E1125 19:04:01.330602 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:04:05 crc kubenswrapper[4926]: I1125 19:04:05.124627 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 19:04:05 crc kubenswrapper[4926]: I1125 19:04:05.134676 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 19:04:05 crc kubenswrapper[4926]: I1125 19:04:05.734103 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 19:04:14 crc kubenswrapper[4926]: I1125 19:04:14.330052 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:04:14 crc kubenswrapper[4926]: E1125 19:04:14.331150 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:04:18 crc kubenswrapper[4926]: I1125 19:04:18.900333 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 19:04:18 crc kubenswrapper[4926]: I1125 19:04:18.902501 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 19:04:18 crc kubenswrapper[4926]: I1125 19:04:18.904514 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 19:04:18 crc kubenswrapper[4926]: I1125 19:04:18.907183 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 19:04:18 crc kubenswrapper[4926]: I1125 19:04:18.907219 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-c6pk9" Nov 25 19:04:18 crc kubenswrapper[4926]: I1125 19:04:18.907276 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 19:04:18 crc kubenswrapper[4926]: I1125 19:04:18.923905 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.060583 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/701f3ca1-77fd-4044-bdb3-e926855d035e-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.060633 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgchr\" (UniqueName: \"kubernetes.io/projected/701f3ca1-77fd-4044-bdb3-e926855d035e-kube-api-access-jgchr\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.060830 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.060891 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/701f3ca1-77fd-4044-bdb3-e926855d035e-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.060970 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.061046 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701f3ca1-77fd-4044-bdb3-e926855d035e-config-data\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.061066 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.061090 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.061140 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/701f3ca1-77fd-4044-bdb3-e926855d035e-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163508 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163556 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/701f3ca1-77fd-4044-bdb3-e926855d035e-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163584 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163617 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701f3ca1-77fd-4044-bdb3-e926855d035e-config-data\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163657 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163709 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/701f3ca1-77fd-4044-bdb3-e926855d035e-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163754 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/701f3ca1-77fd-4044-bdb3-e926855d035e-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.163777 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgchr\" (UniqueName: \"kubernetes.io/projected/701f3ca1-77fd-4044-bdb3-e926855d035e-kube-api-access-jgchr\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.164710 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.165084 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/701f3ca1-77fd-4044-bdb3-e926855d035e-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.165107 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/701f3ca1-77fd-4044-bdb3-e926855d035e-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.165915 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/701f3ca1-77fd-4044-bdb3-e926855d035e-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.166098 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701f3ca1-77fd-4044-bdb3-e926855d035e-config-data\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.170418 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.174204 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.176322 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/701f3ca1-77fd-4044-bdb3-e926855d035e-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.188617 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgchr\" (UniqueName: \"kubernetes.io/projected/701f3ca1-77fd-4044-bdb3-e926855d035e-kube-api-access-jgchr\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.199141 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"701f3ca1-77fd-4044-bdb3-e926855d035e\") " pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.255305 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.764820 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 19:04:19 crc kubenswrapper[4926]: I1125 19:04:19.874262 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"701f3ca1-77fd-4044-bdb3-e926855d035e","Type":"ContainerStarted","Data":"20bebcd2fa51064313607a944b7550d38b1bb54b5584d6870ae17081c48ea7bf"} Nov 25 19:04:25 crc kubenswrapper[4926]: I1125 19:04:25.329068 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:04:25 crc kubenswrapper[4926]: E1125 19:04:25.332042 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:04:32 crc kubenswrapper[4926]: I1125 19:04:32.537563 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"701f3ca1-77fd-4044-bdb3-e926855d035e","Type":"ContainerStarted","Data":"b0d24c60ba94ce293c3e2ff42bab3f2f4a999fc7ae30a2c63fba906c67a8e40b"} Nov 25 19:04:32 crc kubenswrapper[4926]: I1125 19:04:32.579015 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=5.266342052 podStartE2EDuration="15.578996698s" podCreationTimestamp="2025-11-25 19:04:17 +0000 UTC" firstStartedPulling="2025-11-25 19:04:19.779512833 +0000 UTC m=+3090.165026438" lastFinishedPulling="2025-11-25 19:04:30.092167479 +0000 UTC m=+3100.477681084" observedRunningTime="2025-11-25 19:04:32.57616332 +0000 UTC m=+3102.961676935" watchObservedRunningTime="2025-11-25 19:04:32.578996698 +0000 UTC m=+3102.964510313" Nov 25 19:04:39 crc kubenswrapper[4926]: I1125 19:04:39.329758 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:04:39 crc kubenswrapper[4926]: E1125 19:04:39.330730 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:04:52 crc kubenswrapper[4926]: I1125 19:04:52.330326 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:04:52 crc kubenswrapper[4926]: E1125 19:04:52.331290 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:05:04 crc kubenswrapper[4926]: I1125 19:05:04.330090 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:05:04 crc kubenswrapper[4926]: E1125 19:05:04.330846 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:05:15 crc kubenswrapper[4926]: I1125 19:05:15.329061 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:05:15 crc kubenswrapper[4926]: E1125 19:05:15.331128 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:05:30 crc kubenswrapper[4926]: I1125 19:05:30.336308 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:05:30 crc kubenswrapper[4926]: E1125 19:05:30.337642 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:05:41 crc kubenswrapper[4926]: I1125 19:05:41.330618 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:05:41 crc kubenswrapper[4926]: E1125 19:05:41.331991 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:05:55 crc kubenswrapper[4926]: I1125 19:05:55.329722 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:05:55 crc kubenswrapper[4926]: E1125 19:05:55.331115 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:06:06 crc kubenswrapper[4926]: I1125 19:06:06.330180 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:06:06 crc kubenswrapper[4926]: E1125 19:06:06.331500 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:06:17 crc kubenswrapper[4926]: I1125 19:06:17.330480 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:06:17 crc kubenswrapper[4926]: E1125 19:06:17.331440 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:06:31 crc kubenswrapper[4926]: I1125 19:06:31.329528 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:06:31 crc kubenswrapper[4926]: E1125 19:06:31.330594 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:06:45 crc kubenswrapper[4926]: I1125 19:06:45.330124 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:06:46 crc kubenswrapper[4926]: I1125 19:06:46.159906 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"56502f4cdf88f62be162b5bc8dcd2f42381cf1b68b92d668e0f33554693a6fa5"} Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.423427 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4t4rh"] Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.428638 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.453369 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4t4rh"] Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.464833 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-catalog-content\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.464884 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-utilities\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.464969 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ccgm\" (UniqueName: \"kubernetes.io/projected/32c7bb59-751d-49e7-aba6-e2695d59997d-kube-api-access-2ccgm\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.566264 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ccgm\" (UniqueName: \"kubernetes.io/projected/32c7bb59-751d-49e7-aba6-e2695d59997d-kube-api-access-2ccgm\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.566958 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-catalog-content\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.567009 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-utilities\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.567645 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-catalog-content\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.567676 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-utilities\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.607096 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ccgm\" (UniqueName: \"kubernetes.io/projected/32c7bb59-751d-49e7-aba6-e2695d59997d-kube-api-access-2ccgm\") pod \"redhat-marketplace-4t4rh\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:08:59 crc kubenswrapper[4926]: I1125 19:08:59.757325 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:09:00 crc kubenswrapper[4926]: I1125 19:09:00.373753 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4t4rh"] Nov 25 19:09:00 crc kubenswrapper[4926]: I1125 19:09:00.771559 4926 generic.go:334] "Generic (PLEG): container finished" podID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerID="9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa" exitCode=0 Nov 25 19:09:00 crc kubenswrapper[4926]: I1125 19:09:00.771806 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4t4rh" event={"ID":"32c7bb59-751d-49e7-aba6-e2695d59997d","Type":"ContainerDied","Data":"9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa"} Nov 25 19:09:00 crc kubenswrapper[4926]: I1125 19:09:00.771832 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4t4rh" event={"ID":"32c7bb59-751d-49e7-aba6-e2695d59997d","Type":"ContainerStarted","Data":"5e5dc38c9dfcbdd357357bd5ac77ec2bcbb05e6d73b0ee2d96d8ad0efc8db9d0"} Nov 25 19:09:00 crc kubenswrapper[4926]: I1125 19:09:00.773538 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 19:09:02 crc kubenswrapper[4926]: I1125 19:09:02.816710 4926 generic.go:334] "Generic (PLEG): container finished" podID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerID="e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd" exitCode=0 Nov 25 19:09:02 crc kubenswrapper[4926]: I1125 19:09:02.816965 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4t4rh" event={"ID":"32c7bb59-751d-49e7-aba6-e2695d59997d","Type":"ContainerDied","Data":"e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd"} Nov 25 19:09:03 crc kubenswrapper[4926]: I1125 19:09:03.541080 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:09:03 crc kubenswrapper[4926]: I1125 19:09:03.541619 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:09:03 crc kubenswrapper[4926]: I1125 19:09:03.834059 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4t4rh" event={"ID":"32c7bb59-751d-49e7-aba6-e2695d59997d","Type":"ContainerStarted","Data":"d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc"} Nov 25 19:09:03 crc kubenswrapper[4926]: I1125 19:09:03.859647 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4t4rh" podStartSLOduration=2.3389095109999998 podStartE2EDuration="4.859628168s" podCreationTimestamp="2025-11-25 19:08:59 +0000 UTC" firstStartedPulling="2025-11-25 19:09:00.773290749 +0000 UTC m=+3371.158804354" lastFinishedPulling="2025-11-25 19:09:03.294009366 +0000 UTC m=+3373.679523011" observedRunningTime="2025-11-25 19:09:03.859436863 +0000 UTC m=+3374.244950478" watchObservedRunningTime="2025-11-25 19:09:03.859628168 +0000 UTC m=+3374.245141783" Nov 25 19:09:09 crc kubenswrapper[4926]: I1125 19:09:09.757926 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:09:09 crc kubenswrapper[4926]: I1125 19:09:09.758888 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:09:09 crc kubenswrapper[4926]: I1125 19:09:09.839524 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:09:09 crc kubenswrapper[4926]: I1125 19:09:09.972048 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:09:10 crc kubenswrapper[4926]: I1125 19:09:10.087238 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4t4rh"] Nov 25 19:09:11 crc kubenswrapper[4926]: I1125 19:09:11.925214 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4t4rh" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="registry-server" containerID="cri-o://d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc" gracePeriod=2 Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.426037 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.595970 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ccgm\" (UniqueName: \"kubernetes.io/projected/32c7bb59-751d-49e7-aba6-e2695d59997d-kube-api-access-2ccgm\") pod \"32c7bb59-751d-49e7-aba6-e2695d59997d\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.596025 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-utilities\") pod \"32c7bb59-751d-49e7-aba6-e2695d59997d\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.596270 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-catalog-content\") pod \"32c7bb59-751d-49e7-aba6-e2695d59997d\" (UID: \"32c7bb59-751d-49e7-aba6-e2695d59997d\") " Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.597023 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-utilities" (OuterVolumeSpecName: "utilities") pod "32c7bb59-751d-49e7-aba6-e2695d59997d" (UID: "32c7bb59-751d-49e7-aba6-e2695d59997d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.603727 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32c7bb59-751d-49e7-aba6-e2695d59997d-kube-api-access-2ccgm" (OuterVolumeSpecName: "kube-api-access-2ccgm") pod "32c7bb59-751d-49e7-aba6-e2695d59997d" (UID: "32c7bb59-751d-49e7-aba6-e2695d59997d"). InnerVolumeSpecName "kube-api-access-2ccgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.615846 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "32c7bb59-751d-49e7-aba6-e2695d59997d" (UID: "32c7bb59-751d-49e7-aba6-e2695d59997d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.698907 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ccgm\" (UniqueName: \"kubernetes.io/projected/32c7bb59-751d-49e7-aba6-e2695d59997d-kube-api-access-2ccgm\") on node \"crc\" DevicePath \"\"" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.698939 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.698949 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32c7bb59-751d-49e7-aba6-e2695d59997d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.936760 4926 generic.go:334] "Generic (PLEG): container finished" podID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerID="d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc" exitCode=0 Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.936847 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4t4rh" event={"ID":"32c7bb59-751d-49e7-aba6-e2695d59997d","Type":"ContainerDied","Data":"d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc"} Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.936924 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4t4rh" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.937117 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4t4rh" event={"ID":"32c7bb59-751d-49e7-aba6-e2695d59997d","Type":"ContainerDied","Data":"5e5dc38c9dfcbdd357357bd5ac77ec2bcbb05e6d73b0ee2d96d8ad0efc8db9d0"} Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.937153 4926 scope.go:117] "RemoveContainer" containerID="d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc" Nov 25 19:09:12 crc kubenswrapper[4926]: I1125 19:09:12.963012 4926 scope.go:117] "RemoveContainer" containerID="e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd" Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.006647 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4t4rh"] Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.020979 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4t4rh"] Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.032115 4926 scope.go:117] "RemoveContainer" containerID="9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa" Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.053856 4926 scope.go:117] "RemoveContainer" containerID="d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc" Nov 25 19:09:13 crc kubenswrapper[4926]: E1125 19:09:13.054226 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc\": container with ID starting with d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc not found: ID does not exist" containerID="d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc" Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.054262 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc"} err="failed to get container status \"d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc\": rpc error: code = NotFound desc = could not find container \"d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc\": container with ID starting with d039b14c71f32c0853d74b119993b105c6680789cf51e13b849c67f2fd3be5fc not found: ID does not exist" Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.054290 4926 scope.go:117] "RemoveContainer" containerID="e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd" Nov 25 19:09:13 crc kubenswrapper[4926]: E1125 19:09:13.054551 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd\": container with ID starting with e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd not found: ID does not exist" containerID="e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd" Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.054580 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd"} err="failed to get container status \"e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd\": rpc error: code = NotFound desc = could not find container \"e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd\": container with ID starting with e5ec445a8bf067e68517a5b2028e31a28e34113fd028984da6ce9a0990305dcd not found: ID does not exist" Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.054602 4926 scope.go:117] "RemoveContainer" containerID="9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa" Nov 25 19:09:13 crc kubenswrapper[4926]: E1125 19:09:13.054931 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa\": container with ID starting with 9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa not found: ID does not exist" containerID="9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa" Nov 25 19:09:13 crc kubenswrapper[4926]: I1125 19:09:13.054960 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa"} err="failed to get container status \"9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa\": rpc error: code = NotFound desc = could not find container \"9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa\": container with ID starting with 9237f9e80d5abab09c7864ee95a1cac802ca4d9bbcb7e9af78cc02e2773f59fa not found: ID does not exist" Nov 25 19:09:14 crc kubenswrapper[4926]: I1125 19:09:14.339468 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" path="/var/lib/kubelet/pods/32c7bb59-751d-49e7-aba6-e2695d59997d/volumes" Nov 25 19:09:33 crc kubenswrapper[4926]: I1125 19:09:33.542277 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:09:33 crc kubenswrapper[4926]: I1125 19:09:33.543178 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:10:03 crc kubenswrapper[4926]: I1125 19:10:03.541286 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:10:03 crc kubenswrapper[4926]: I1125 19:10:03.541972 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:10:03 crc kubenswrapper[4926]: I1125 19:10:03.542026 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:10:03 crc kubenswrapper[4926]: I1125 19:10:03.542930 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"56502f4cdf88f62be162b5bc8dcd2f42381cf1b68b92d668e0f33554693a6fa5"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:10:03 crc kubenswrapper[4926]: I1125 19:10:03.542988 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://56502f4cdf88f62be162b5bc8dcd2f42381cf1b68b92d668e0f33554693a6fa5" gracePeriod=600 Nov 25 19:10:04 crc kubenswrapper[4926]: I1125 19:10:04.640982 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="56502f4cdf88f62be162b5bc8dcd2f42381cf1b68b92d668e0f33554693a6fa5" exitCode=0 Nov 25 19:10:04 crc kubenswrapper[4926]: I1125 19:10:04.641059 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"56502f4cdf88f62be162b5bc8dcd2f42381cf1b68b92d668e0f33554693a6fa5"} Nov 25 19:10:04 crc kubenswrapper[4926]: I1125 19:10:04.643060 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1"} Nov 25 19:10:04 crc kubenswrapper[4926]: I1125 19:10:04.643131 4926 scope.go:117] "RemoveContainer" containerID="9cffca3c787e830dc4590b444eebe78167839e520833075e1ce1cfc859036a36" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.448920 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wjsww"] Nov 25 19:10:54 crc kubenswrapper[4926]: E1125 19:10:54.450435 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="extract-content" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.450458 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="extract-content" Nov 25 19:10:54 crc kubenswrapper[4926]: E1125 19:10:54.450481 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="registry-server" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.450493 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="registry-server" Nov 25 19:10:54 crc kubenswrapper[4926]: E1125 19:10:54.450545 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="extract-utilities" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.450559 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="extract-utilities" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.450988 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="32c7bb59-751d-49e7-aba6-e2695d59997d" containerName="registry-server" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.454491 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.467183 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wjsww"] Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.547183 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-catalog-content\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.547591 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-utilities\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.547839 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhzwn\" (UniqueName: \"kubernetes.io/projected/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-kube-api-access-jhzwn\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.649708 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-catalog-content\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.650201 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-utilities\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.650281 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhzwn\" (UniqueName: \"kubernetes.io/projected/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-kube-api-access-jhzwn\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.650442 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-catalog-content\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.650857 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-utilities\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.676554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhzwn\" (UniqueName: \"kubernetes.io/projected/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-kube-api-access-jhzwn\") pod \"community-operators-wjsww\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:54 crc kubenswrapper[4926]: I1125 19:10:54.779524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:10:55 crc kubenswrapper[4926]: I1125 19:10:55.348382 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wjsww"] Nov 25 19:10:56 crc kubenswrapper[4926]: I1125 19:10:56.240418 4926 generic.go:334] "Generic (PLEG): container finished" podID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerID="3a5d5b5ddb38055cca8e3f3c8f6d5cd41fcc096758dbb9f0a1896ee142918c4a" exitCode=0 Nov 25 19:10:56 crc kubenswrapper[4926]: I1125 19:10:56.240497 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjsww" event={"ID":"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea","Type":"ContainerDied","Data":"3a5d5b5ddb38055cca8e3f3c8f6d5cd41fcc096758dbb9f0a1896ee142918c4a"} Nov 25 19:10:56 crc kubenswrapper[4926]: I1125 19:10:56.241618 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjsww" event={"ID":"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea","Type":"ContainerStarted","Data":"647ce0d7ed7f0f6be2a0070b0412c8c9fffdcb811d314419debc2d2ab6dab1c9"} Nov 25 19:11:00 crc kubenswrapper[4926]: I1125 19:11:00.283589 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjsww" event={"ID":"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea","Type":"ContainerStarted","Data":"4817cd05b8c86d4b338f39b7314e82234eb9432ff067c2c214254e8592bcd445"} Nov 25 19:11:01 crc kubenswrapper[4926]: I1125 19:11:01.304034 4926 generic.go:334] "Generic (PLEG): container finished" podID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerID="4817cd05b8c86d4b338f39b7314e82234eb9432ff067c2c214254e8592bcd445" exitCode=0 Nov 25 19:11:01 crc kubenswrapper[4926]: I1125 19:11:01.304146 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjsww" event={"ID":"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea","Type":"ContainerDied","Data":"4817cd05b8c86d4b338f39b7314e82234eb9432ff067c2c214254e8592bcd445"} Nov 25 19:11:02 crc kubenswrapper[4926]: I1125 19:11:02.320305 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjsww" event={"ID":"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea","Type":"ContainerStarted","Data":"fba00f4dadb2d3eea5da78a6012aaa91948eb7947d95fd7e9d0052eb9945f24a"} Nov 25 19:11:02 crc kubenswrapper[4926]: I1125 19:11:02.353617 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wjsww" podStartSLOduration=2.827850944 podStartE2EDuration="8.353591772s" podCreationTimestamp="2025-11-25 19:10:54 +0000 UTC" firstStartedPulling="2025-11-25 19:10:56.242596909 +0000 UTC m=+3486.628110514" lastFinishedPulling="2025-11-25 19:11:01.768337707 +0000 UTC m=+3492.153851342" observedRunningTime="2025-11-25 19:11:02.341963292 +0000 UTC m=+3492.727476937" watchObservedRunningTime="2025-11-25 19:11:02.353591772 +0000 UTC m=+3492.739105417" Nov 25 19:11:04 crc kubenswrapper[4926]: I1125 19:11:04.780881 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:11:04 crc kubenswrapper[4926]: I1125 19:11:04.781449 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:11:04 crc kubenswrapper[4926]: I1125 19:11:04.846786 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:11:14 crc kubenswrapper[4926]: I1125 19:11:14.890048 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wjsww" Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.015452 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wjsww"] Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.103392 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gxcdm"] Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.103670 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gxcdm" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="registry-server" containerID="cri-o://b966e160527e1120cc67cf52430e5910b04c0c6622546b2cc303693448d8bc3b" gracePeriod=2 Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.467271 4926 generic.go:334] "Generic (PLEG): container finished" podID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerID="b966e160527e1120cc67cf52430e5910b04c0c6622546b2cc303693448d8bc3b" exitCode=0 Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.467769 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxcdm" event={"ID":"75a6defb-2e36-42a9-85b0-6913304b59a6","Type":"ContainerDied","Data":"b966e160527e1120cc67cf52430e5910b04c0c6622546b2cc303693448d8bc3b"} Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.592522 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.639394 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmdvs\" (UniqueName: \"kubernetes.io/projected/75a6defb-2e36-42a9-85b0-6913304b59a6-kube-api-access-lmdvs\") pod \"75a6defb-2e36-42a9-85b0-6913304b59a6\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.639494 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-catalog-content\") pod \"75a6defb-2e36-42a9-85b0-6913304b59a6\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.639542 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-utilities\") pod \"75a6defb-2e36-42a9-85b0-6913304b59a6\" (UID: \"75a6defb-2e36-42a9-85b0-6913304b59a6\") " Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.642800 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-utilities" (OuterVolumeSpecName: "utilities") pod "75a6defb-2e36-42a9-85b0-6913304b59a6" (UID: "75a6defb-2e36-42a9-85b0-6913304b59a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.656083 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75a6defb-2e36-42a9-85b0-6913304b59a6-kube-api-access-lmdvs" (OuterVolumeSpecName: "kube-api-access-lmdvs") pod "75a6defb-2e36-42a9-85b0-6913304b59a6" (UID: "75a6defb-2e36-42a9-85b0-6913304b59a6"). InnerVolumeSpecName "kube-api-access-lmdvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.732150 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "75a6defb-2e36-42a9-85b0-6913304b59a6" (UID: "75a6defb-2e36-42a9-85b0-6913304b59a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.741945 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmdvs\" (UniqueName: \"kubernetes.io/projected/75a6defb-2e36-42a9-85b0-6913304b59a6-kube-api-access-lmdvs\") on node \"crc\" DevicePath \"\"" Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.741987 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:11:15 crc kubenswrapper[4926]: I1125 19:11:15.741999 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75a6defb-2e36-42a9-85b0-6913304b59a6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:11:16 crc kubenswrapper[4926]: I1125 19:11:16.489324 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gxcdm" event={"ID":"75a6defb-2e36-42a9-85b0-6913304b59a6","Type":"ContainerDied","Data":"5d53954aa1e30c9e7134a042a8542a65d7eb5dd8fbedd3383855629e14f6cdb0"} Nov 25 19:11:16 crc kubenswrapper[4926]: I1125 19:11:16.489385 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gxcdm" Nov 25 19:11:16 crc kubenswrapper[4926]: I1125 19:11:16.489393 4926 scope.go:117] "RemoveContainer" containerID="b966e160527e1120cc67cf52430e5910b04c0c6622546b2cc303693448d8bc3b" Nov 25 19:11:16 crc kubenswrapper[4926]: I1125 19:11:16.512181 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gxcdm"] Nov 25 19:11:16 crc kubenswrapper[4926]: I1125 19:11:16.519429 4926 scope.go:117] "RemoveContainer" containerID="6c6e11ec136a8fbbacb3561466a9c6b9ef565b10b3833a821ac12cd9b8313b22" Nov 25 19:11:16 crc kubenswrapper[4926]: I1125 19:11:16.525391 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gxcdm"] Nov 25 19:11:16 crc kubenswrapper[4926]: I1125 19:11:16.542337 4926 scope.go:117] "RemoveContainer" containerID="4749c79461bc0cae1eef58404ad0abbbe08652c192b3a1e093f3527f6ebae264" Nov 25 19:11:18 crc kubenswrapper[4926]: I1125 19:11:18.344424 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" path="/var/lib/kubelet/pods/75a6defb-2e36-42a9-85b0-6913304b59a6/volumes" Nov 25 19:12:03 crc kubenswrapper[4926]: I1125 19:12:03.541942 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:12:03 crc kubenswrapper[4926]: I1125 19:12:03.542639 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.766610 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tc4hr"] Nov 25 19:12:17 crc kubenswrapper[4926]: E1125 19:12:17.767728 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="extract-content" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.767744 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="extract-content" Nov 25 19:12:17 crc kubenswrapper[4926]: E1125 19:12:17.767792 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="registry-server" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.767798 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="registry-server" Nov 25 19:12:17 crc kubenswrapper[4926]: E1125 19:12:17.767807 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="extract-utilities" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.767815 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="extract-utilities" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.768033 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="75a6defb-2e36-42a9-85b0-6913304b59a6" containerName="registry-server" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.770085 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.783596 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tc4hr"] Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.945080 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-catalog-content\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.945180 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-utilities\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:17 crc kubenswrapper[4926]: I1125 19:12:17.945208 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt27r\" (UniqueName: \"kubernetes.io/projected/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-kube-api-access-gt27r\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.047743 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-catalog-content\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.047958 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-utilities\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.048010 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt27r\" (UniqueName: \"kubernetes.io/projected/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-kube-api-access-gt27r\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.048589 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-catalog-content\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.048671 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-utilities\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.074501 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt27r\" (UniqueName: \"kubernetes.io/projected/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-kube-api-access-gt27r\") pod \"redhat-operators-tc4hr\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.094845 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:18 crc kubenswrapper[4926]: I1125 19:12:18.634073 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tc4hr"] Nov 25 19:12:19 crc kubenswrapper[4926]: I1125 19:12:19.222608 4926 generic.go:334] "Generic (PLEG): container finished" podID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerID="834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212" exitCode=0 Nov 25 19:12:19 crc kubenswrapper[4926]: I1125 19:12:19.222715 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tc4hr" event={"ID":"403e16a3-ceaa-420c-8718-7a88fb0b1fa1","Type":"ContainerDied","Data":"834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212"} Nov 25 19:12:19 crc kubenswrapper[4926]: I1125 19:12:19.222981 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tc4hr" event={"ID":"403e16a3-ceaa-420c-8718-7a88fb0b1fa1","Type":"ContainerStarted","Data":"e6655fb3ce8beabd9b779bf5569185d4d0083021df6ff445b69d6b776936bb8b"} Nov 25 19:12:20 crc kubenswrapper[4926]: I1125 19:12:20.237420 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tc4hr" event={"ID":"403e16a3-ceaa-420c-8718-7a88fb0b1fa1","Type":"ContainerStarted","Data":"03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08"} Nov 25 19:12:24 crc kubenswrapper[4926]: I1125 19:12:24.283874 4926 generic.go:334] "Generic (PLEG): container finished" podID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerID="03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08" exitCode=0 Nov 25 19:12:24 crc kubenswrapper[4926]: I1125 19:12:24.284259 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tc4hr" event={"ID":"403e16a3-ceaa-420c-8718-7a88fb0b1fa1","Type":"ContainerDied","Data":"03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08"} Nov 25 19:12:25 crc kubenswrapper[4926]: I1125 19:12:25.294393 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tc4hr" event={"ID":"403e16a3-ceaa-420c-8718-7a88fb0b1fa1","Type":"ContainerStarted","Data":"e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae"} Nov 25 19:12:25 crc kubenswrapper[4926]: I1125 19:12:25.315665 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tc4hr" podStartSLOduration=2.823661612 podStartE2EDuration="8.315646804s" podCreationTimestamp="2025-11-25 19:12:17 +0000 UTC" firstStartedPulling="2025-11-25 19:12:19.224045789 +0000 UTC m=+3569.609559394" lastFinishedPulling="2025-11-25 19:12:24.716030981 +0000 UTC m=+3575.101544586" observedRunningTime="2025-11-25 19:12:25.313389832 +0000 UTC m=+3575.698903457" watchObservedRunningTime="2025-11-25 19:12:25.315646804 +0000 UTC m=+3575.701160409" Nov 25 19:12:28 crc kubenswrapper[4926]: I1125 19:12:28.095279 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:28 crc kubenswrapper[4926]: I1125 19:12:28.095656 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:29 crc kubenswrapper[4926]: I1125 19:12:29.159222 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tc4hr" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="registry-server" probeResult="failure" output=< Nov 25 19:12:29 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 19:12:29 crc kubenswrapper[4926]: > Nov 25 19:12:33 crc kubenswrapper[4926]: I1125 19:12:33.541828 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:12:33 crc kubenswrapper[4926]: I1125 19:12:33.544622 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:12:38 crc kubenswrapper[4926]: I1125 19:12:38.189501 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:38 crc kubenswrapper[4926]: I1125 19:12:38.263755 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:38 crc kubenswrapper[4926]: I1125 19:12:38.439928 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tc4hr"] Nov 25 19:12:39 crc kubenswrapper[4926]: I1125 19:12:39.484025 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tc4hr" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="registry-server" containerID="cri-o://e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae" gracePeriod=2 Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.031556 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.135158 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt27r\" (UniqueName: \"kubernetes.io/projected/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-kube-api-access-gt27r\") pod \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.135225 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-catalog-content\") pod \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.135264 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-utilities\") pod \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\" (UID: \"403e16a3-ceaa-420c-8718-7a88fb0b1fa1\") " Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.136963 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-utilities" (OuterVolumeSpecName: "utilities") pod "403e16a3-ceaa-420c-8718-7a88fb0b1fa1" (UID: "403e16a3-ceaa-420c-8718-7a88fb0b1fa1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.146624 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-kube-api-access-gt27r" (OuterVolumeSpecName: "kube-api-access-gt27r") pod "403e16a3-ceaa-420c-8718-7a88fb0b1fa1" (UID: "403e16a3-ceaa-420c-8718-7a88fb0b1fa1"). InnerVolumeSpecName "kube-api-access-gt27r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.238717 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt27r\" (UniqueName: \"kubernetes.io/projected/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-kube-api-access-gt27r\") on node \"crc\" DevicePath \"\"" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.238752 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.286261 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "403e16a3-ceaa-420c-8718-7a88fb0b1fa1" (UID: "403e16a3-ceaa-420c-8718-7a88fb0b1fa1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.340005 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403e16a3-ceaa-420c-8718-7a88fb0b1fa1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.501474 4926 generic.go:334] "Generic (PLEG): container finished" podID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerID="e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae" exitCode=0 Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.501867 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tc4hr" event={"ID":"403e16a3-ceaa-420c-8718-7a88fb0b1fa1","Type":"ContainerDied","Data":"e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae"} Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.501903 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tc4hr" event={"ID":"403e16a3-ceaa-420c-8718-7a88fb0b1fa1","Type":"ContainerDied","Data":"e6655fb3ce8beabd9b779bf5569185d4d0083021df6ff445b69d6b776936bb8b"} Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.501943 4926 scope.go:117] "RemoveContainer" containerID="e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.502172 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tc4hr" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.546924 4926 scope.go:117] "RemoveContainer" containerID="03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.549143 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tc4hr"] Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.562004 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tc4hr"] Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.586122 4926 scope.go:117] "RemoveContainer" containerID="834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.638288 4926 scope.go:117] "RemoveContainer" containerID="e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae" Nov 25 19:12:40 crc kubenswrapper[4926]: E1125 19:12:40.638922 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae\": container with ID starting with e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae not found: ID does not exist" containerID="e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.638991 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae"} err="failed to get container status \"e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae\": rpc error: code = NotFound desc = could not find container \"e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae\": container with ID starting with e8580d59dbc0bba90de83ab3e2be3c861188dab4fe341160cdf6bd5f080e8eae not found: ID does not exist" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.639040 4926 scope.go:117] "RemoveContainer" containerID="03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08" Nov 25 19:12:40 crc kubenswrapper[4926]: E1125 19:12:40.639478 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08\": container with ID starting with 03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08 not found: ID does not exist" containerID="03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.639543 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08"} err="failed to get container status \"03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08\": rpc error: code = NotFound desc = could not find container \"03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08\": container with ID starting with 03e61de2af44f979fcc8581cf2f903a44697c5000769954dd107b0facbc02e08 not found: ID does not exist" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.639576 4926 scope.go:117] "RemoveContainer" containerID="834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212" Nov 25 19:12:40 crc kubenswrapper[4926]: E1125 19:12:40.639899 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212\": container with ID starting with 834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212 not found: ID does not exist" containerID="834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212" Nov 25 19:12:40 crc kubenswrapper[4926]: I1125 19:12:40.639929 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212"} err="failed to get container status \"834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212\": rpc error: code = NotFound desc = could not find container \"834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212\": container with ID starting with 834aa02da43668661595c1db9b5973ace5853655b3d7b7477e61fd7147eca212 not found: ID does not exist" Nov 25 19:12:42 crc kubenswrapper[4926]: I1125 19:12:42.353070 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" path="/var/lib/kubelet/pods/403e16a3-ceaa-420c-8718-7a88fb0b1fa1/volumes" Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.541764 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.542416 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.542471 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.543582 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.543655 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" gracePeriod=600 Nov 25 19:13:03 crc kubenswrapper[4926]: E1125 19:13:03.671314 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.864515 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" exitCode=0 Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.864567 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1"} Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.864611 4926 scope.go:117] "RemoveContainer" containerID="56502f4cdf88f62be162b5bc8dcd2f42381cf1b68b92d668e0f33554693a6fa5" Nov 25 19:13:03 crc kubenswrapper[4926]: I1125 19:13:03.865508 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:13:03 crc kubenswrapper[4926]: E1125 19:13:03.866031 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:13:16 crc kubenswrapper[4926]: I1125 19:13:16.330321 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:13:16 crc kubenswrapper[4926]: E1125 19:13:16.331012 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:13:28 crc kubenswrapper[4926]: I1125 19:13:28.329566 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:13:28 crc kubenswrapper[4926]: E1125 19:13:28.330844 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:13:43 crc kubenswrapper[4926]: I1125 19:13:43.329116 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:13:43 crc kubenswrapper[4926]: E1125 19:13:43.329978 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.160865 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dt8rc"] Nov 25 19:13:52 crc kubenswrapper[4926]: E1125 19:13:52.162107 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="extract-content" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.162130 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="extract-content" Nov 25 19:13:52 crc kubenswrapper[4926]: E1125 19:13:52.162154 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="registry-server" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.162165 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="registry-server" Nov 25 19:13:52 crc kubenswrapper[4926]: E1125 19:13:52.162183 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="extract-utilities" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.162195 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="extract-utilities" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.162659 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="403e16a3-ceaa-420c-8718-7a88fb0b1fa1" containerName="registry-server" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.165005 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.216134 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dt8rc"] Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.306724 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf478\" (UniqueName: \"kubernetes.io/projected/c0aec656-4181-4aae-a1f4-56344ab869c2-kube-api-access-rf478\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.306813 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-catalog-content\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.307146 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-utilities\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.409633 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-utilities\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.410059 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf478\" (UniqueName: \"kubernetes.io/projected/c0aec656-4181-4aae-a1f4-56344ab869c2-kube-api-access-rf478\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.410121 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-catalog-content\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.410347 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-utilities\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.410596 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-catalog-content\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.440109 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf478\" (UniqueName: \"kubernetes.io/projected/c0aec656-4181-4aae-a1f4-56344ab869c2-kube-api-access-rf478\") pod \"certified-operators-dt8rc\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.520653 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:13:52 crc kubenswrapper[4926]: I1125 19:13:52.848083 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dt8rc"] Nov 25 19:13:53 crc kubenswrapper[4926]: I1125 19:13:53.496068 4926 generic.go:334] "Generic (PLEG): container finished" podID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerID="65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd" exitCode=0 Nov 25 19:13:53 crc kubenswrapper[4926]: I1125 19:13:53.496167 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dt8rc" event={"ID":"c0aec656-4181-4aae-a1f4-56344ab869c2","Type":"ContainerDied","Data":"65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd"} Nov 25 19:13:53 crc kubenswrapper[4926]: I1125 19:13:53.496600 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dt8rc" event={"ID":"c0aec656-4181-4aae-a1f4-56344ab869c2","Type":"ContainerStarted","Data":"1dc5950c310ce4a0eb8a54e04c0030b292b6658db4f2d3a85a5cc477109b5a6b"} Nov 25 19:13:55 crc kubenswrapper[4926]: I1125 19:13:55.521695 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dt8rc" event={"ID":"c0aec656-4181-4aae-a1f4-56344ab869c2","Type":"ContainerStarted","Data":"c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee"} Nov 25 19:13:56 crc kubenswrapper[4926]: I1125 19:13:56.330100 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:13:56 crc kubenswrapper[4926]: E1125 19:13:56.330839 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:13:56 crc kubenswrapper[4926]: I1125 19:13:56.539996 4926 generic.go:334] "Generic (PLEG): container finished" podID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerID="c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee" exitCode=0 Nov 25 19:13:56 crc kubenswrapper[4926]: I1125 19:13:56.540493 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dt8rc" event={"ID":"c0aec656-4181-4aae-a1f4-56344ab869c2","Type":"ContainerDied","Data":"c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee"} Nov 25 19:13:57 crc kubenswrapper[4926]: I1125 19:13:57.567726 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dt8rc" event={"ID":"c0aec656-4181-4aae-a1f4-56344ab869c2","Type":"ContainerStarted","Data":"b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce"} Nov 25 19:13:57 crc kubenswrapper[4926]: I1125 19:13:57.601972 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dt8rc" podStartSLOduration=2.14982603 podStartE2EDuration="5.601946464s" podCreationTimestamp="2025-11-25 19:13:52 +0000 UTC" firstStartedPulling="2025-11-25 19:13:53.500987867 +0000 UTC m=+3663.886501512" lastFinishedPulling="2025-11-25 19:13:56.953108301 +0000 UTC m=+3667.338621946" observedRunningTime="2025-11-25 19:13:57.589822342 +0000 UTC m=+3667.975335997" watchObservedRunningTime="2025-11-25 19:13:57.601946464 +0000 UTC m=+3667.987460079" Nov 25 19:14:02 crc kubenswrapper[4926]: I1125 19:14:02.521722 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:14:02 crc kubenswrapper[4926]: I1125 19:14:02.522770 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:14:02 crc kubenswrapper[4926]: I1125 19:14:02.622866 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:14:02 crc kubenswrapper[4926]: I1125 19:14:02.721086 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:14:02 crc kubenswrapper[4926]: I1125 19:14:02.877243 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dt8rc"] Nov 25 19:14:04 crc kubenswrapper[4926]: I1125 19:14:04.667492 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dt8rc" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="registry-server" containerID="cri-o://b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce" gracePeriod=2 Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.187188 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.331021 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rf478\" (UniqueName: \"kubernetes.io/projected/c0aec656-4181-4aae-a1f4-56344ab869c2-kube-api-access-rf478\") pod \"c0aec656-4181-4aae-a1f4-56344ab869c2\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.331152 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-utilities\") pod \"c0aec656-4181-4aae-a1f4-56344ab869c2\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.331363 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-catalog-content\") pod \"c0aec656-4181-4aae-a1f4-56344ab869c2\" (UID: \"c0aec656-4181-4aae-a1f4-56344ab869c2\") " Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.332594 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-utilities" (OuterVolumeSpecName: "utilities") pod "c0aec656-4181-4aae-a1f4-56344ab869c2" (UID: "c0aec656-4181-4aae-a1f4-56344ab869c2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.343581 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0aec656-4181-4aae-a1f4-56344ab869c2-kube-api-access-rf478" (OuterVolumeSpecName: "kube-api-access-rf478") pod "c0aec656-4181-4aae-a1f4-56344ab869c2" (UID: "c0aec656-4181-4aae-a1f4-56344ab869c2"). InnerVolumeSpecName "kube-api-access-rf478". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.436013 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rf478\" (UniqueName: \"kubernetes.io/projected/c0aec656-4181-4aae-a1f4-56344ab869c2-kube-api-access-rf478\") on node \"crc\" DevicePath \"\"" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.436340 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.637987 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0aec656-4181-4aae-a1f4-56344ab869c2" (UID: "c0aec656-4181-4aae-a1f4-56344ab869c2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.640684 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0aec656-4181-4aae-a1f4-56344ab869c2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.684697 4926 generic.go:334] "Generic (PLEG): container finished" podID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerID="b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce" exitCode=0 Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.684767 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dt8rc" event={"ID":"c0aec656-4181-4aae-a1f4-56344ab869c2","Type":"ContainerDied","Data":"b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce"} Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.684824 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dt8rc" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.684833 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dt8rc" event={"ID":"c0aec656-4181-4aae-a1f4-56344ab869c2","Type":"ContainerDied","Data":"1dc5950c310ce4a0eb8a54e04c0030b292b6658db4f2d3a85a5cc477109b5a6b"} Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.684924 4926 scope.go:117] "RemoveContainer" containerID="b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.711203 4926 scope.go:117] "RemoveContainer" containerID="c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.736137 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dt8rc"] Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.745550 4926 scope.go:117] "RemoveContainer" containerID="65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.751636 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dt8rc"] Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.797982 4926 scope.go:117] "RemoveContainer" containerID="b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce" Nov 25 19:14:05 crc kubenswrapper[4926]: E1125 19:14:05.798478 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce\": container with ID starting with b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce not found: ID does not exist" containerID="b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.798528 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce"} err="failed to get container status \"b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce\": rpc error: code = NotFound desc = could not find container \"b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce\": container with ID starting with b60ad7b825cb6ffe667aab03fdd5cd0ffc538efa552f956c06c6468244c3c0ce not found: ID does not exist" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.798563 4926 scope.go:117] "RemoveContainer" containerID="c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee" Nov 25 19:14:05 crc kubenswrapper[4926]: E1125 19:14:05.799038 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee\": container with ID starting with c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee not found: ID does not exist" containerID="c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.799077 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee"} err="failed to get container status \"c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee\": rpc error: code = NotFound desc = could not find container \"c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee\": container with ID starting with c6ac8fbb2601bf1fecd7f6018f810e57a6d54c86bf6d96fd58d9ea507ef70dee not found: ID does not exist" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.799107 4926 scope.go:117] "RemoveContainer" containerID="65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd" Nov 25 19:14:05 crc kubenswrapper[4926]: E1125 19:14:05.799431 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd\": container with ID starting with 65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd not found: ID does not exist" containerID="65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd" Nov 25 19:14:05 crc kubenswrapper[4926]: I1125 19:14:05.799461 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd"} err="failed to get container status \"65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd\": rpc error: code = NotFound desc = could not find container \"65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd\": container with ID starting with 65f799121f743ec99c4eb801860dea67f170fd302b716c47b9bd37f91c58b5cd not found: ID does not exist" Nov 25 19:14:06 crc kubenswrapper[4926]: I1125 19:14:06.346959 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" path="/var/lib/kubelet/pods/c0aec656-4181-4aae-a1f4-56344ab869c2/volumes" Nov 25 19:14:08 crc kubenswrapper[4926]: I1125 19:14:08.328921 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:14:08 crc kubenswrapper[4926]: E1125 19:14:08.329504 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:14:22 crc kubenswrapper[4926]: I1125 19:14:22.329418 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:14:22 crc kubenswrapper[4926]: E1125 19:14:22.330341 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:14:37 crc kubenswrapper[4926]: I1125 19:14:37.329882 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:14:37 crc kubenswrapper[4926]: E1125 19:14:37.330810 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:14:49 crc kubenswrapper[4926]: I1125 19:14:49.329480 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:14:49 crc kubenswrapper[4926]: E1125 19:14:49.330282 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.202350 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk"] Nov 25 19:15:00 crc kubenswrapper[4926]: E1125 19:15:00.203323 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="extract-content" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.203339 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="extract-content" Nov 25 19:15:00 crc kubenswrapper[4926]: E1125 19:15:00.203400 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="registry-server" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.203409 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="registry-server" Nov 25 19:15:00 crc kubenswrapper[4926]: E1125 19:15:00.203429 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="extract-utilities" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.203438 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="extract-utilities" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.203687 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0aec656-4181-4aae-a1f4-56344ab869c2" containerName="registry-server" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.204488 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.206873 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.208598 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.235113 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk"] Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.275286 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64kxt\" (UniqueName: \"kubernetes.io/projected/46b88141-c093-48d6-9e52-6cd42570c8dc-kube-api-access-64kxt\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.275470 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46b88141-c093-48d6-9e52-6cd42570c8dc-secret-volume\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.275491 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b88141-c093-48d6-9e52-6cd42570c8dc-config-volume\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.376538 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46b88141-c093-48d6-9e52-6cd42570c8dc-secret-volume\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.376582 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b88141-c093-48d6-9e52-6cd42570c8dc-config-volume\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.376654 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64kxt\" (UniqueName: \"kubernetes.io/projected/46b88141-c093-48d6-9e52-6cd42570c8dc-kube-api-access-64kxt\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.377771 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b88141-c093-48d6-9e52-6cd42570c8dc-config-volume\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.395489 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64kxt\" (UniqueName: \"kubernetes.io/projected/46b88141-c093-48d6-9e52-6cd42570c8dc-kube-api-access-64kxt\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.395575 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46b88141-c093-48d6-9e52-6cd42570c8dc-secret-volume\") pod \"collect-profiles-29401635-9ldmk\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:00 crc kubenswrapper[4926]: I1125 19:15:00.540741 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:01 crc kubenswrapper[4926]: I1125 19:15:01.000247 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk"] Nov 25 19:15:01 crc kubenswrapper[4926]: I1125 19:15:01.332872 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" event={"ID":"46b88141-c093-48d6-9e52-6cd42570c8dc","Type":"ContainerStarted","Data":"4503e6ea8158be6fee693deb1669f39a466f9086706f77d7b19053e6f7c0dacd"} Nov 25 19:15:01 crc kubenswrapper[4926]: I1125 19:15:01.333268 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" event={"ID":"46b88141-c093-48d6-9e52-6cd42570c8dc","Type":"ContainerStarted","Data":"3e4180cb0af218fc37ebcca428a20a4f27270b399a7523994b89f62e4c0b30ce"} Nov 25 19:15:01 crc kubenswrapper[4926]: I1125 19:15:01.360630 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" podStartSLOduration=1.360591717 podStartE2EDuration="1.360591717s" podCreationTimestamp="2025-11-25 19:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 19:15:01.356552266 +0000 UTC m=+3731.742065871" watchObservedRunningTime="2025-11-25 19:15:01.360591717 +0000 UTC m=+3731.746105362" Nov 25 19:15:02 crc kubenswrapper[4926]: I1125 19:15:02.343242 4926 generic.go:334] "Generic (PLEG): container finished" podID="46b88141-c093-48d6-9e52-6cd42570c8dc" containerID="4503e6ea8158be6fee693deb1669f39a466f9086706f77d7b19053e6f7c0dacd" exitCode=0 Nov 25 19:15:02 crc kubenswrapper[4926]: I1125 19:15:02.343296 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" event={"ID":"46b88141-c093-48d6-9e52-6cd42570c8dc","Type":"ContainerDied","Data":"4503e6ea8158be6fee693deb1669f39a466f9086706f77d7b19053e6f7c0dacd"} Nov 25 19:15:03 crc kubenswrapper[4926]: I1125 19:15:03.879793 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:03 crc kubenswrapper[4926]: I1125 19:15:03.965802 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46b88141-c093-48d6-9e52-6cd42570c8dc-secret-volume\") pod \"46b88141-c093-48d6-9e52-6cd42570c8dc\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " Nov 25 19:15:03 crc kubenswrapper[4926]: I1125 19:15:03.966927 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b88141-c093-48d6-9e52-6cd42570c8dc-config-volume\") pod \"46b88141-c093-48d6-9e52-6cd42570c8dc\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " Nov 25 19:15:03 crc kubenswrapper[4926]: I1125 19:15:03.967041 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64kxt\" (UniqueName: \"kubernetes.io/projected/46b88141-c093-48d6-9e52-6cd42570c8dc-kube-api-access-64kxt\") pod \"46b88141-c093-48d6-9e52-6cd42570c8dc\" (UID: \"46b88141-c093-48d6-9e52-6cd42570c8dc\") " Nov 25 19:15:03 crc kubenswrapper[4926]: I1125 19:15:03.968587 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46b88141-c093-48d6-9e52-6cd42570c8dc-config-volume" (OuterVolumeSpecName: "config-volume") pod "46b88141-c093-48d6-9e52-6cd42570c8dc" (UID: "46b88141-c093-48d6-9e52-6cd42570c8dc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 19:15:03 crc kubenswrapper[4926]: I1125 19:15:03.981568 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46b88141-c093-48d6-9e52-6cd42570c8dc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "46b88141-c093-48d6-9e52-6cd42570c8dc" (UID: "46b88141-c093-48d6-9e52-6cd42570c8dc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:15:03 crc kubenswrapper[4926]: I1125 19:15:03.981687 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b88141-c093-48d6-9e52-6cd42570c8dc-kube-api-access-64kxt" (OuterVolumeSpecName: "kube-api-access-64kxt") pod "46b88141-c093-48d6-9e52-6cd42570c8dc" (UID: "46b88141-c093-48d6-9e52-6cd42570c8dc"). InnerVolumeSpecName "kube-api-access-64kxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.069868 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46b88141-c093-48d6-9e52-6cd42570c8dc-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.069913 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b88141-c093-48d6-9e52-6cd42570c8dc-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.069927 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64kxt\" (UniqueName: \"kubernetes.io/projected/46b88141-c093-48d6-9e52-6cd42570c8dc-kube-api-access-64kxt\") on node \"crc\" DevicePath \"\"" Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.329710 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:15:04 crc kubenswrapper[4926]: E1125 19:15:04.330062 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.368502 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" event={"ID":"46b88141-c093-48d6-9e52-6cd42570c8dc","Type":"ContainerDied","Data":"3e4180cb0af218fc37ebcca428a20a4f27270b399a7523994b89f62e4c0b30ce"} Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.368542 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e4180cb0af218fc37ebcca428a20a4f27270b399a7523994b89f62e4c0b30ce" Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.368639 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk" Nov 25 19:15:04 crc kubenswrapper[4926]: I1125 19:15:04.993943 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc"] Nov 25 19:15:05 crc kubenswrapper[4926]: I1125 19:15:05.014560 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401590-jbjjc"] Nov 25 19:15:06 crc kubenswrapper[4926]: I1125 19:15:06.351330 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="136a79fc-cb97-4d69-8e29-8907348a37ce" path="/var/lib/kubelet/pods/136a79fc-cb97-4d69-8e29-8907348a37ce/volumes" Nov 25 19:15:18 crc kubenswrapper[4926]: I1125 19:15:18.330469 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:15:18 crc kubenswrapper[4926]: E1125 19:15:18.331418 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:15:23 crc kubenswrapper[4926]: I1125 19:15:23.515339 4926 scope.go:117] "RemoveContainer" containerID="2a3f6ecd7a0fa20487992bf3ef3c6e3ef316c4a3dea291e98a577dcd7982d458" Nov 25 19:15:33 crc kubenswrapper[4926]: I1125 19:15:33.329944 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:15:33 crc kubenswrapper[4926]: E1125 19:15:33.330962 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:15:44 crc kubenswrapper[4926]: I1125 19:15:44.329849 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:15:44 crc kubenswrapper[4926]: E1125 19:15:44.331155 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:15:55 crc kubenswrapper[4926]: I1125 19:15:55.329676 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:15:55 crc kubenswrapper[4926]: E1125 19:15:55.330508 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:16:06 crc kubenswrapper[4926]: I1125 19:16:06.330009 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:16:06 crc kubenswrapper[4926]: E1125 19:16:06.331502 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:16:21 crc kubenswrapper[4926]: I1125 19:16:21.329168 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:16:21 crc kubenswrapper[4926]: E1125 19:16:21.331168 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:16:33 crc kubenswrapper[4926]: I1125 19:16:33.329709 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:16:33 crc kubenswrapper[4926]: E1125 19:16:33.332724 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:16:45 crc kubenswrapper[4926]: I1125 19:16:45.329365 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:16:45 crc kubenswrapper[4926]: E1125 19:16:45.330403 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:17:00 crc kubenswrapper[4926]: I1125 19:17:00.348775 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:17:00 crc kubenswrapper[4926]: E1125 19:17:00.352842 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:17:13 crc kubenswrapper[4926]: I1125 19:17:13.329235 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:17:13 crc kubenswrapper[4926]: E1125 19:17:13.330113 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:17:26 crc kubenswrapper[4926]: I1125 19:17:26.330401 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:17:26 crc kubenswrapper[4926]: E1125 19:17:26.331286 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:17:41 crc kubenswrapper[4926]: I1125 19:17:41.332457 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:17:41 crc kubenswrapper[4926]: E1125 19:17:41.334188 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:17:55 crc kubenswrapper[4926]: I1125 19:17:55.329180 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:17:55 crc kubenswrapper[4926]: E1125 19:17:55.330139 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:18:07 crc kubenswrapper[4926]: I1125 19:18:07.329988 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:18:08 crc kubenswrapper[4926]: I1125 19:18:08.479710 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"e4121bf6c26b0a7bae3a6d58d31d57558cb32620b34f91bffebb99fb65bd481b"} Nov 25 19:20:33 crc kubenswrapper[4926]: I1125 19:20:33.541831 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:20:33 crc kubenswrapper[4926]: I1125 19:20:33.542563 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:21:03 crc kubenswrapper[4926]: I1125 19:21:03.541599 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:21:03 crc kubenswrapper[4926]: I1125 19:21:03.542227 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:21:33 crc kubenswrapper[4926]: I1125 19:21:33.541784 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:21:33 crc kubenswrapper[4926]: I1125 19:21:33.542417 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:21:33 crc kubenswrapper[4926]: I1125 19:21:33.542471 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:21:33 crc kubenswrapper[4926]: I1125 19:21:33.543046 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e4121bf6c26b0a7bae3a6d58d31d57558cb32620b34f91bffebb99fb65bd481b"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:21:33 crc kubenswrapper[4926]: I1125 19:21:33.543106 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://e4121bf6c26b0a7bae3a6d58d31d57558cb32620b34f91bffebb99fb65bd481b" gracePeriod=600 Nov 25 19:21:34 crc kubenswrapper[4926]: I1125 19:21:34.725484 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="e4121bf6c26b0a7bae3a6d58d31d57558cb32620b34f91bffebb99fb65bd481b" exitCode=0 Nov 25 19:21:34 crc kubenswrapper[4926]: I1125 19:21:34.725577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"e4121bf6c26b0a7bae3a6d58d31d57558cb32620b34f91bffebb99fb65bd481b"} Nov 25 19:21:34 crc kubenswrapper[4926]: I1125 19:21:34.726137 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e"} Nov 25 19:21:34 crc kubenswrapper[4926]: I1125 19:21:34.726165 4926 scope.go:117] "RemoveContainer" containerID="f56183be6f55958344a722947a9b9bc71fe4ec26bb53351c9829702938691dd1" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.355019 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c4tsv"] Nov 25 19:22:08 crc kubenswrapper[4926]: E1125 19:22:08.358116 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b88141-c093-48d6-9e52-6cd42570c8dc" containerName="collect-profiles" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.358139 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b88141-c093-48d6-9e52-6cd42570c8dc" containerName="collect-profiles" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.358393 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="46b88141-c093-48d6-9e52-6cd42570c8dc" containerName="collect-profiles" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.360530 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.367271 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c4tsv"] Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.375830 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frjc8\" (UniqueName: \"kubernetes.io/projected/a34ae9ca-4096-4f54-b664-729d6e373c85-kube-api-access-frjc8\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.376172 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-utilities\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.376697 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-catalog-content\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.478149 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-utilities\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.478503 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-catalog-content\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.478608 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frjc8\" (UniqueName: \"kubernetes.io/projected/a34ae9ca-4096-4f54-b664-729d6e373c85-kube-api-access-frjc8\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.478840 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-catalog-content\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.478926 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-utilities\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.507422 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frjc8\" (UniqueName: \"kubernetes.io/projected/a34ae9ca-4096-4f54-b664-729d6e373c85-kube-api-access-frjc8\") pod \"community-operators-c4tsv\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:08 crc kubenswrapper[4926]: I1125 19:22:08.698266 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:09 crc kubenswrapper[4926]: I1125 19:22:09.277824 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c4tsv"] Nov 25 19:22:10 crc kubenswrapper[4926]: I1125 19:22:10.190592 4926 generic.go:334] "Generic (PLEG): container finished" podID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerID="6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c" exitCode=0 Nov 25 19:22:10 crc kubenswrapper[4926]: I1125 19:22:10.190735 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4tsv" event={"ID":"a34ae9ca-4096-4f54-b664-729d6e373c85","Type":"ContainerDied","Data":"6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c"} Nov 25 19:22:10 crc kubenswrapper[4926]: I1125 19:22:10.191454 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4tsv" event={"ID":"a34ae9ca-4096-4f54-b664-729d6e373c85","Type":"ContainerStarted","Data":"8b1a8df7097554305c893792e45c3d8b5801b45ac2e0915d62f326d48752b417"} Nov 25 19:22:10 crc kubenswrapper[4926]: I1125 19:22:10.195418 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 19:22:11 crc kubenswrapper[4926]: I1125 19:22:11.203101 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4tsv" event={"ID":"a34ae9ca-4096-4f54-b664-729d6e373c85","Type":"ContainerStarted","Data":"2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205"} Nov 25 19:22:13 crc kubenswrapper[4926]: I1125 19:22:13.226496 4926 generic.go:334] "Generic (PLEG): container finished" podID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerID="2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205" exitCode=0 Nov 25 19:22:13 crc kubenswrapper[4926]: I1125 19:22:13.226598 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4tsv" event={"ID":"a34ae9ca-4096-4f54-b664-729d6e373c85","Type":"ContainerDied","Data":"2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205"} Nov 25 19:22:14 crc kubenswrapper[4926]: I1125 19:22:14.240256 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4tsv" event={"ID":"a34ae9ca-4096-4f54-b664-729d6e373c85","Type":"ContainerStarted","Data":"45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7"} Nov 25 19:22:14 crc kubenswrapper[4926]: I1125 19:22:14.264561 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c4tsv" podStartSLOduration=2.807300491 podStartE2EDuration="6.264527695s" podCreationTimestamp="2025-11-25 19:22:08 +0000 UTC" firstStartedPulling="2025-11-25 19:22:10.195072994 +0000 UTC m=+4160.580586609" lastFinishedPulling="2025-11-25 19:22:13.652300198 +0000 UTC m=+4164.037813813" observedRunningTime="2025-11-25 19:22:14.257265068 +0000 UTC m=+4164.642778683" watchObservedRunningTime="2025-11-25 19:22:14.264527695 +0000 UTC m=+4164.650041300" Nov 25 19:22:18 crc kubenswrapper[4926]: I1125 19:22:18.698813 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:18 crc kubenswrapper[4926]: I1125 19:22:18.699307 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:18 crc kubenswrapper[4926]: I1125 19:22:18.779453 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:19 crc kubenswrapper[4926]: I1125 19:22:19.388722 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:19 crc kubenswrapper[4926]: I1125 19:22:19.469470 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c4tsv"] Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.325504 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c4tsv" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="registry-server" containerID="cri-o://45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7" gracePeriod=2 Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.884368 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.938919 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-catalog-content\") pod \"a34ae9ca-4096-4f54-b664-729d6e373c85\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.939034 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frjc8\" (UniqueName: \"kubernetes.io/projected/a34ae9ca-4096-4f54-b664-729d6e373c85-kube-api-access-frjc8\") pod \"a34ae9ca-4096-4f54-b664-729d6e373c85\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.939332 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-utilities\") pod \"a34ae9ca-4096-4f54-b664-729d6e373c85\" (UID: \"a34ae9ca-4096-4f54-b664-729d6e373c85\") " Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.940557 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-utilities" (OuterVolumeSpecName: "utilities") pod "a34ae9ca-4096-4f54-b664-729d6e373c85" (UID: "a34ae9ca-4096-4f54-b664-729d6e373c85"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.945963 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a34ae9ca-4096-4f54-b664-729d6e373c85-kube-api-access-frjc8" (OuterVolumeSpecName: "kube-api-access-frjc8") pod "a34ae9ca-4096-4f54-b664-729d6e373c85" (UID: "a34ae9ca-4096-4f54-b664-729d6e373c85"). InnerVolumeSpecName "kube-api-access-frjc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:22:21 crc kubenswrapper[4926]: I1125 19:22:21.997044 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a34ae9ca-4096-4f54-b664-729d6e373c85" (UID: "a34ae9ca-4096-4f54-b664-729d6e373c85"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.042612 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.042645 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a34ae9ca-4096-4f54-b664-729d6e373c85-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.042658 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frjc8\" (UniqueName: \"kubernetes.io/projected/a34ae9ca-4096-4f54-b664-729d6e373c85-kube-api-access-frjc8\") on node \"crc\" DevicePath \"\"" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.343703 4926 generic.go:334] "Generic (PLEG): container finished" podID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerID="45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7" exitCode=0 Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.345595 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4tsv" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.350425 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4tsv" event={"ID":"a34ae9ca-4096-4f54-b664-729d6e373c85","Type":"ContainerDied","Data":"45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7"} Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.350500 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4tsv" event={"ID":"a34ae9ca-4096-4f54-b664-729d6e373c85","Type":"ContainerDied","Data":"8b1a8df7097554305c893792e45c3d8b5801b45ac2e0915d62f326d48752b417"} Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.350534 4926 scope.go:117] "RemoveContainer" containerID="45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.388243 4926 scope.go:117] "RemoveContainer" containerID="2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.422143 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c4tsv"] Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.435955 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c4tsv"] Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.437775 4926 scope.go:117] "RemoveContainer" containerID="6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.509249 4926 scope.go:117] "RemoveContainer" containerID="45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7" Nov 25 19:22:22 crc kubenswrapper[4926]: E1125 19:22:22.510267 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7\": container with ID starting with 45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7 not found: ID does not exist" containerID="45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.510359 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7"} err="failed to get container status \"45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7\": rpc error: code = NotFound desc = could not find container \"45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7\": container with ID starting with 45f1dc9c0548a86ecea8e72a64b89bb63066712277bd67d889dc3fceba6a21e7 not found: ID does not exist" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.510426 4926 scope.go:117] "RemoveContainer" containerID="2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205" Nov 25 19:22:22 crc kubenswrapper[4926]: E1125 19:22:22.510958 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205\": container with ID starting with 2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205 not found: ID does not exist" containerID="2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.510998 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205"} err="failed to get container status \"2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205\": rpc error: code = NotFound desc = could not find container \"2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205\": container with ID starting with 2850d03eb9195597fbad60058c0a49b3d0bca93ba8aa8620ce78cebafd05e205 not found: ID does not exist" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.511026 4926 scope.go:117] "RemoveContainer" containerID="6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c" Nov 25 19:22:22 crc kubenswrapper[4926]: E1125 19:22:22.511334 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c\": container with ID starting with 6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c not found: ID does not exist" containerID="6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c" Nov 25 19:22:22 crc kubenswrapper[4926]: I1125 19:22:22.511408 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c"} err="failed to get container status \"6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c\": rpc error: code = NotFound desc = could not find container \"6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c\": container with ID starting with 6d8a08e02fa569b259e75a1aaf03c86579bc048cfb68760786117ec6ac190e0c not found: ID does not exist" Nov 25 19:22:24 crc kubenswrapper[4926]: I1125 19:22:24.345149 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" path="/var/lib/kubelet/pods/a34ae9ca-4096-4f54-b664-729d6e373c85/volumes" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.640719 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kgswc"] Nov 25 19:23:35 crc kubenswrapper[4926]: E1125 19:23:35.641943 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="extract-utilities" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.642482 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="extract-utilities" Nov 25 19:23:35 crc kubenswrapper[4926]: E1125 19:23:35.642531 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="registry-server" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.642546 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="registry-server" Nov 25 19:23:35 crc kubenswrapper[4926]: E1125 19:23:35.642650 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="extract-content" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.642665 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="extract-content" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.646268 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a34ae9ca-4096-4f54-b664-729d6e373c85" containerName="registry-server" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.656874 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.688569 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kgswc"] Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.795067 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtd5p\" (UniqueName: \"kubernetes.io/projected/6babbde3-7bc4-4243-86ea-ca348fd19359-kube-api-access-xtd5p\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.795136 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-catalog-content\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.795539 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-utilities\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.898209 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtd5p\" (UniqueName: \"kubernetes.io/projected/6babbde3-7bc4-4243-86ea-ca348fd19359-kube-api-access-xtd5p\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.898285 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-catalog-content\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.898842 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-catalog-content\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.899035 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-utilities\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.899444 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-utilities\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:35 crc kubenswrapper[4926]: I1125 19:23:35.922866 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtd5p\" (UniqueName: \"kubernetes.io/projected/6babbde3-7bc4-4243-86ea-ca348fd19359-kube-api-access-xtd5p\") pod \"redhat-operators-kgswc\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:36 crc kubenswrapper[4926]: I1125 19:23:36.003429 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:36 crc kubenswrapper[4926]: I1125 19:23:36.526955 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kgswc"] Nov 25 19:23:37 crc kubenswrapper[4926]: I1125 19:23:37.317072 4926 generic.go:334] "Generic (PLEG): container finished" podID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerID="bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01" exitCode=0 Nov 25 19:23:37 crc kubenswrapper[4926]: I1125 19:23:37.317130 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgswc" event={"ID":"6babbde3-7bc4-4243-86ea-ca348fd19359","Type":"ContainerDied","Data":"bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01"} Nov 25 19:23:37 crc kubenswrapper[4926]: I1125 19:23:37.317391 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgswc" event={"ID":"6babbde3-7bc4-4243-86ea-ca348fd19359","Type":"ContainerStarted","Data":"faa8c2ded1338be1f4f9e052389d1b4bc90389136a5d6b38af800e877d39b5a6"} Nov 25 19:23:38 crc kubenswrapper[4926]: I1125 19:23:38.364073 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgswc" event={"ID":"6babbde3-7bc4-4243-86ea-ca348fd19359","Type":"ContainerStarted","Data":"c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35"} Nov 25 19:23:52 crc kubenswrapper[4926]: I1125 19:23:52.530570 4926 generic.go:334] "Generic (PLEG): container finished" podID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerID="c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35" exitCode=0 Nov 25 19:23:52 crc kubenswrapper[4926]: I1125 19:23:52.530672 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgswc" event={"ID":"6babbde3-7bc4-4243-86ea-ca348fd19359","Type":"ContainerDied","Data":"c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35"} Nov 25 19:23:53 crc kubenswrapper[4926]: I1125 19:23:53.543919 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgswc" event={"ID":"6babbde3-7bc4-4243-86ea-ca348fd19359","Type":"ContainerStarted","Data":"cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610"} Nov 25 19:23:56 crc kubenswrapper[4926]: I1125 19:23:56.004418 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:56 crc kubenswrapper[4926]: I1125 19:23:56.005936 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:23:57 crc kubenswrapper[4926]: I1125 19:23:57.098526 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kgswc" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="registry-server" probeResult="failure" output=< Nov 25 19:23:57 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 19:23:57 crc kubenswrapper[4926]: > Nov 25 19:24:03 crc kubenswrapper[4926]: I1125 19:24:03.541682 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:24:03 crc kubenswrapper[4926]: I1125 19:24:03.542464 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:24:06 crc kubenswrapper[4926]: I1125 19:24:06.080542 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:24:06 crc kubenswrapper[4926]: I1125 19:24:06.102447 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kgswc" podStartSLOduration=15.252288057 podStartE2EDuration="31.102426684s" podCreationTimestamp="2025-11-25 19:23:35 +0000 UTC" firstStartedPulling="2025-11-25 19:23:37.319627654 +0000 UTC m=+4247.705141289" lastFinishedPulling="2025-11-25 19:23:53.169766271 +0000 UTC m=+4263.555279916" observedRunningTime="2025-11-25 19:23:53.580136329 +0000 UTC m=+4263.965649984" watchObservedRunningTime="2025-11-25 19:24:06.102426684 +0000 UTC m=+4276.487940289" Nov 25 19:24:06 crc kubenswrapper[4926]: I1125 19:24:06.142770 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:24:06 crc kubenswrapper[4926]: I1125 19:24:06.827442 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kgswc"] Nov 25 19:24:07 crc kubenswrapper[4926]: I1125 19:24:07.687207 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kgswc" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="registry-server" containerID="cri-o://cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610" gracePeriod=2 Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.148868 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.281812 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-catalog-content\") pod \"6babbde3-7bc4-4243-86ea-ca348fd19359\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.281878 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-utilities\") pod \"6babbde3-7bc4-4243-86ea-ca348fd19359\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.282060 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtd5p\" (UniqueName: \"kubernetes.io/projected/6babbde3-7bc4-4243-86ea-ca348fd19359-kube-api-access-xtd5p\") pod \"6babbde3-7bc4-4243-86ea-ca348fd19359\" (UID: \"6babbde3-7bc4-4243-86ea-ca348fd19359\") " Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.282853 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-utilities" (OuterVolumeSpecName: "utilities") pod "6babbde3-7bc4-4243-86ea-ca348fd19359" (UID: "6babbde3-7bc4-4243-86ea-ca348fd19359"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.306051 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6babbde3-7bc4-4243-86ea-ca348fd19359-kube-api-access-xtd5p" (OuterVolumeSpecName: "kube-api-access-xtd5p") pod "6babbde3-7bc4-4243-86ea-ca348fd19359" (UID: "6babbde3-7bc4-4243-86ea-ca348fd19359"). InnerVolumeSpecName "kube-api-access-xtd5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.385333 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.385396 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtd5p\" (UniqueName: \"kubernetes.io/projected/6babbde3-7bc4-4243-86ea-ca348fd19359-kube-api-access-xtd5p\") on node \"crc\" DevicePath \"\"" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.420546 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6babbde3-7bc4-4243-86ea-ca348fd19359" (UID: "6babbde3-7bc4-4243-86ea-ca348fd19359"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.487154 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6babbde3-7bc4-4243-86ea-ca348fd19359-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.698361 4926 generic.go:334] "Generic (PLEG): container finished" podID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerID="cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610" exitCode=0 Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.698425 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgswc" event={"ID":"6babbde3-7bc4-4243-86ea-ca348fd19359","Type":"ContainerDied","Data":"cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610"} Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.698457 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgswc" event={"ID":"6babbde3-7bc4-4243-86ea-ca348fd19359","Type":"ContainerDied","Data":"faa8c2ded1338be1f4f9e052389d1b4bc90389136a5d6b38af800e877d39b5a6"} Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.698477 4926 scope.go:117] "RemoveContainer" containerID="cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.698592 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgswc" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.731959 4926 scope.go:117] "RemoveContainer" containerID="c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.740957 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kgswc"] Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.752436 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kgswc"] Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.775594 4926 scope.go:117] "RemoveContainer" containerID="bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.853951 4926 scope.go:117] "RemoveContainer" containerID="cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610" Nov 25 19:24:08 crc kubenswrapper[4926]: E1125 19:24:08.854895 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610\": container with ID starting with cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610 not found: ID does not exist" containerID="cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.854953 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610"} err="failed to get container status \"cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610\": rpc error: code = NotFound desc = could not find container \"cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610\": container with ID starting with cd2098f5fbae55db4763a308bcc7f0cb56de99d33ddfff5cf5592674d9120610 not found: ID does not exist" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.854987 4926 scope.go:117] "RemoveContainer" containerID="c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35" Nov 25 19:24:08 crc kubenswrapper[4926]: E1125 19:24:08.855610 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35\": container with ID starting with c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35 not found: ID does not exist" containerID="c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.855644 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35"} err="failed to get container status \"c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35\": rpc error: code = NotFound desc = could not find container \"c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35\": container with ID starting with c61e23cc2535a581f488404dbaf0e49d7c391ca94499c71edd03ad5c9de8fa35 not found: ID does not exist" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.855668 4926 scope.go:117] "RemoveContainer" containerID="bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01" Nov 25 19:24:08 crc kubenswrapper[4926]: E1125 19:24:08.855964 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01\": container with ID starting with bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01 not found: ID does not exist" containerID="bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01" Nov 25 19:24:08 crc kubenswrapper[4926]: I1125 19:24:08.856007 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01"} err="failed to get container status \"bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01\": rpc error: code = NotFound desc = could not find container \"bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01\": container with ID starting with bc19e616ac15f99f32f876e7cc863a90729c3f908e6f238a2952dc99e0370a01 not found: ID does not exist" Nov 25 19:24:10 crc kubenswrapper[4926]: I1125 19:24:10.347642 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" path="/var/lib/kubelet/pods/6babbde3-7bc4-4243-86ea-ca348fd19359/volumes" Nov 25 19:24:33 crc kubenswrapper[4926]: I1125 19:24:33.541507 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:24:33 crc kubenswrapper[4926]: I1125 19:24:33.542341 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.541888 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.542548 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.542598 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.543511 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.543589 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" gracePeriod=600 Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.617808 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zftgt"] Nov 25 19:25:03 crc kubenswrapper[4926]: E1125 19:25:03.618462 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="extract-content" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.618496 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="extract-content" Nov 25 19:25:03 crc kubenswrapper[4926]: E1125 19:25:03.618526 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="extract-utilities" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.618541 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="extract-utilities" Nov 25 19:25:03 crc kubenswrapper[4926]: E1125 19:25:03.618599 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="registry-server" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.618612 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="registry-server" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.618990 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6babbde3-7bc4-4243-86ea-ca348fd19359" containerName="registry-server" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.621866 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.638815 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zftgt"] Nov 25 19:25:03 crc kubenswrapper[4926]: E1125 19:25:03.706072 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.784154 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkmfc\" (UniqueName: \"kubernetes.io/projected/8c7a9c4f-fe15-403f-a678-2f8b693919ff-kube-api-access-kkmfc\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.784401 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-catalog-content\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.784461 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-utilities\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.886984 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkmfc\" (UniqueName: \"kubernetes.io/projected/8c7a9c4f-fe15-403f-a678-2f8b693919ff-kube-api-access-kkmfc\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.887097 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-catalog-content\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.887125 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-utilities\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.887895 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-utilities\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.887890 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-catalog-content\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:03 crc kubenswrapper[4926]: I1125 19:25:03.921181 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkmfc\" (UniqueName: \"kubernetes.io/projected/8c7a9c4f-fe15-403f-a678-2f8b693919ff-kube-api-access-kkmfc\") pod \"certified-operators-zftgt\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:04 crc kubenswrapper[4926]: I1125 19:25:04.029050 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:04 crc kubenswrapper[4926]: I1125 19:25:04.345819 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" exitCode=0 Nov 25 19:25:04 crc kubenswrapper[4926]: I1125 19:25:04.346311 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e"} Nov 25 19:25:04 crc kubenswrapper[4926]: I1125 19:25:04.346418 4926 scope.go:117] "RemoveContainer" containerID="e4121bf6c26b0a7bae3a6d58d31d57558cb32620b34f91bffebb99fb65bd481b" Nov 25 19:25:04 crc kubenswrapper[4926]: I1125 19:25:04.347701 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:25:04 crc kubenswrapper[4926]: E1125 19:25:04.348275 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:25:04 crc kubenswrapper[4926]: I1125 19:25:04.514642 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zftgt"] Nov 25 19:25:05 crc kubenswrapper[4926]: I1125 19:25:05.364663 4926 generic.go:334] "Generic (PLEG): container finished" podID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerID="68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2" exitCode=0 Nov 25 19:25:05 crc kubenswrapper[4926]: I1125 19:25:05.364732 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zftgt" event={"ID":"8c7a9c4f-fe15-403f-a678-2f8b693919ff","Type":"ContainerDied","Data":"68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2"} Nov 25 19:25:05 crc kubenswrapper[4926]: I1125 19:25:05.365059 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zftgt" event={"ID":"8c7a9c4f-fe15-403f-a678-2f8b693919ff","Type":"ContainerStarted","Data":"c6031211308e12e9e1b2ee9207c09918c7f6b63716e6476e127aac44b6a4cc6e"} Nov 25 19:25:07 crc kubenswrapper[4926]: I1125 19:25:07.391811 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zftgt" event={"ID":"8c7a9c4f-fe15-403f-a678-2f8b693919ff","Type":"ContainerStarted","Data":"9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e"} Nov 25 19:25:08 crc kubenswrapper[4926]: I1125 19:25:08.405015 4926 generic.go:334] "Generic (PLEG): container finished" podID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerID="9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e" exitCode=0 Nov 25 19:25:08 crc kubenswrapper[4926]: I1125 19:25:08.405069 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zftgt" event={"ID":"8c7a9c4f-fe15-403f-a678-2f8b693919ff","Type":"ContainerDied","Data":"9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e"} Nov 25 19:25:09 crc kubenswrapper[4926]: I1125 19:25:09.427210 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zftgt" event={"ID":"8c7a9c4f-fe15-403f-a678-2f8b693919ff","Type":"ContainerStarted","Data":"0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b"} Nov 25 19:25:09 crc kubenswrapper[4926]: I1125 19:25:09.467730 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zftgt" podStartSLOduration=3.004559813 podStartE2EDuration="6.467709033s" podCreationTimestamp="2025-11-25 19:25:03 +0000 UTC" firstStartedPulling="2025-11-25 19:25:05.366618473 +0000 UTC m=+4335.752132088" lastFinishedPulling="2025-11-25 19:25:08.829767673 +0000 UTC m=+4339.215281308" observedRunningTime="2025-11-25 19:25:09.453036333 +0000 UTC m=+4339.838549978" watchObservedRunningTime="2025-11-25 19:25:09.467709033 +0000 UTC m=+4339.853222638" Nov 25 19:25:14 crc kubenswrapper[4926]: I1125 19:25:14.029413 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:14 crc kubenswrapper[4926]: I1125 19:25:14.031192 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:14 crc kubenswrapper[4926]: I1125 19:25:14.085816 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:14 crc kubenswrapper[4926]: I1125 19:25:14.530899 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:14 crc kubenswrapper[4926]: I1125 19:25:14.574545 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zftgt"] Nov 25 19:25:16 crc kubenswrapper[4926]: I1125 19:25:16.515679 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zftgt" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="registry-server" containerID="cri-o://0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b" gracePeriod=2 Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.046927 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.218450 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-utilities\") pod \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.218527 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkmfc\" (UniqueName: \"kubernetes.io/projected/8c7a9c4f-fe15-403f-a678-2f8b693919ff-kube-api-access-kkmfc\") pod \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.218850 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-catalog-content\") pod \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\" (UID: \"8c7a9c4f-fe15-403f-a678-2f8b693919ff\") " Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.220497 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-utilities" (OuterVolumeSpecName: "utilities") pod "8c7a9c4f-fe15-403f-a678-2f8b693919ff" (UID: "8c7a9c4f-fe15-403f-a678-2f8b693919ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.225422 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c7a9c4f-fe15-403f-a678-2f8b693919ff-kube-api-access-kkmfc" (OuterVolumeSpecName: "kube-api-access-kkmfc") pod "8c7a9c4f-fe15-403f-a678-2f8b693919ff" (UID: "8c7a9c4f-fe15-403f-a678-2f8b693919ff"). InnerVolumeSpecName "kube-api-access-kkmfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.321339 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.321389 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkmfc\" (UniqueName: \"kubernetes.io/projected/8c7a9c4f-fe15-403f-a678-2f8b693919ff-kube-api-access-kkmfc\") on node \"crc\" DevicePath \"\"" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.526407 4926 generic.go:334] "Generic (PLEG): container finished" podID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerID="0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b" exitCode=0 Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.526666 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zftgt" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.526655 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zftgt" event={"ID":"8c7a9c4f-fe15-403f-a678-2f8b693919ff","Type":"ContainerDied","Data":"0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b"} Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.527027 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zftgt" event={"ID":"8c7a9c4f-fe15-403f-a678-2f8b693919ff","Type":"ContainerDied","Data":"c6031211308e12e9e1b2ee9207c09918c7f6b63716e6476e127aac44b6a4cc6e"} Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.527076 4926 scope.go:117] "RemoveContainer" containerID="0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.560270 4926 scope.go:117] "RemoveContainer" containerID="9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.588203 4926 scope.go:117] "RemoveContainer" containerID="68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.692950 4926 scope.go:117] "RemoveContainer" containerID="0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b" Nov 25 19:25:17 crc kubenswrapper[4926]: E1125 19:25:17.693537 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b\": container with ID starting with 0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b not found: ID does not exist" containerID="0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.693614 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b"} err="failed to get container status \"0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b\": rpc error: code = NotFound desc = could not find container \"0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b\": container with ID starting with 0a1d6ff87be73a1dfe64565dbd16dc541a69c559f417d6821292ad5de25d6f6b not found: ID does not exist" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.693668 4926 scope.go:117] "RemoveContainer" containerID="9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e" Nov 25 19:25:17 crc kubenswrapper[4926]: E1125 19:25:17.694752 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e\": container with ID starting with 9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e not found: ID does not exist" containerID="9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.694788 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e"} err="failed to get container status \"9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e\": rpc error: code = NotFound desc = could not find container \"9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e\": container with ID starting with 9244a60035937d9b6a28034f9501a9e3ce4302ad84c6d1295f332e6deaf2482e not found: ID does not exist" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.694809 4926 scope.go:117] "RemoveContainer" containerID="68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2" Nov 25 19:25:17 crc kubenswrapper[4926]: E1125 19:25:17.695217 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2\": container with ID starting with 68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2 not found: ID does not exist" containerID="68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2" Nov 25 19:25:17 crc kubenswrapper[4926]: I1125 19:25:17.695238 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2"} err="failed to get container status \"68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2\": rpc error: code = NotFound desc = could not find container \"68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2\": container with ID starting with 68df2d90b65d549091e9167dd4c5b1b35a89a52942e8a1a474f1056ac8da85a2 not found: ID does not exist" Nov 25 19:25:18 crc kubenswrapper[4926]: I1125 19:25:18.060832 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8c7a9c4f-fe15-403f-a678-2f8b693919ff" (UID: "8c7a9c4f-fe15-403f-a678-2f8b693919ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:25:18 crc kubenswrapper[4926]: I1125 19:25:18.137215 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7a9c4f-fe15-403f-a678-2f8b693919ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:25:18 crc kubenswrapper[4926]: I1125 19:25:18.204857 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zftgt"] Nov 25 19:25:18 crc kubenswrapper[4926]: I1125 19:25:18.219536 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zftgt"] Nov 25 19:25:18 crc kubenswrapper[4926]: I1125 19:25:18.331896 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:25:18 crc kubenswrapper[4926]: E1125 19:25:18.332355 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:25:18 crc kubenswrapper[4926]: I1125 19:25:18.341342 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" path="/var/lib/kubelet/pods/8c7a9c4f-fe15-403f-a678-2f8b693919ff/volumes" Nov 25 19:25:32 crc kubenswrapper[4926]: I1125 19:25:32.330192 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:25:32 crc kubenswrapper[4926]: E1125 19:25:32.330909 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:25:45 crc kubenswrapper[4926]: I1125 19:25:45.329572 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:25:45 crc kubenswrapper[4926]: E1125 19:25:45.330489 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.553436 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 19:25:47 crc kubenswrapper[4926]: E1125 19:25:47.556216 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="extract-utilities" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.556422 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="extract-utilities" Nov 25 19:25:47 crc kubenswrapper[4926]: E1125 19:25:47.556594 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="registry-server" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.556714 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="registry-server" Nov 25 19:25:47 crc kubenswrapper[4926]: E1125 19:25:47.556842 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="extract-content" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.556971 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="extract-content" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.557479 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c7a9c4f-fe15-403f-a678-2f8b693919ff" containerName="registry-server" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.559667 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.562262 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.562473 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.577933 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.642428 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.643011 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.744884 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.745061 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:47 crc kubenswrapper[4926]: I1125 19:25:47.745252 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:48 crc kubenswrapper[4926]: I1125 19:25:48.274531 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:48 crc kubenswrapper[4926]: I1125 19:25:48.492552 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:49 crc kubenswrapper[4926]: I1125 19:25:49.010189 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 19:25:49 crc kubenswrapper[4926]: I1125 19:25:49.991925 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ebbf7f5f-c5dd-4350-80e5-a149e063ea66","Type":"ContainerStarted","Data":"046233ad4dc854f77b555f2a48676bdf6e8d1f0b51a1b1f67b340f6e25eae7e1"} Nov 25 19:25:49 crc kubenswrapper[4926]: I1125 19:25:49.992272 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ebbf7f5f-c5dd-4350-80e5-a149e063ea66","Type":"ContainerStarted","Data":"16eba6f81387b328f5f116bf56736e3bb99d5eebbf29776ce473136a16275ab8"} Nov 25 19:25:50 crc kubenswrapper[4926]: I1125 19:25:50.015034 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=3.015011251 podStartE2EDuration="3.015011251s" podCreationTimestamp="2025-11-25 19:25:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 19:25:50.010874628 +0000 UTC m=+4380.396388283" watchObservedRunningTime="2025-11-25 19:25:50.015011251 +0000 UTC m=+4380.400524866" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.018416 4926 generic.go:334] "Generic (PLEG): container finished" podID="ebbf7f5f-c5dd-4350-80e5-a149e063ea66" containerID="046233ad4dc854f77b555f2a48676bdf6e8d1f0b51a1b1f67b340f6e25eae7e1" exitCode=0 Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.018491 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ebbf7f5f-c5dd-4350-80e5-a149e063ea66","Type":"ContainerDied","Data":"046233ad4dc854f77b555f2a48676bdf6e8d1f0b51a1b1f67b340f6e25eae7e1"} Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.347171 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.350556 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.379482 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.450821 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kube-api-access\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.451038 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.451790 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-var-lock\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.555149 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.555518 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.555708 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-var-lock\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.555834 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-var-lock\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.555905 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kube-api-access\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.582007 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kube-api-access\") pod \"installer-9-crc\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:52 crc kubenswrapper[4926]: I1125 19:25:52.680867 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.190782 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.382463 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.472477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kube-api-access\") pod \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.472804 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kubelet-dir\") pod \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\" (UID: \"ebbf7f5f-c5dd-4350-80e5-a149e063ea66\") " Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.473689 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ebbf7f5f-c5dd-4350-80e5-a149e063ea66" (UID: "ebbf7f5f-c5dd-4350-80e5-a149e063ea66"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.485033 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ebbf7f5f-c5dd-4350-80e5-a149e063ea66" (UID: "ebbf7f5f-c5dd-4350-80e5-a149e063ea66"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.579178 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 19:25:53 crc kubenswrapper[4926]: I1125 19:25:53.579241 4926 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ebbf7f5f-c5dd-4350-80e5-a149e063ea66-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 19:25:54 crc kubenswrapper[4926]: I1125 19:25:54.050352 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 19:25:54 crc kubenswrapper[4926]: I1125 19:25:54.050427 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ebbf7f5f-c5dd-4350-80e5-a149e063ea66","Type":"ContainerDied","Data":"16eba6f81387b328f5f116bf56736e3bb99d5eebbf29776ce473136a16275ab8"} Nov 25 19:25:54 crc kubenswrapper[4926]: I1125 19:25:54.050972 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16eba6f81387b328f5f116bf56736e3bb99d5eebbf29776ce473136a16275ab8" Nov 25 19:25:54 crc kubenswrapper[4926]: I1125 19:25:54.052245 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a","Type":"ContainerStarted","Data":"032d8c991ce9f26691817dd12d124503a820e50b1629698375984f165319ab45"} Nov 25 19:25:54 crc kubenswrapper[4926]: I1125 19:25:54.052294 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a","Type":"ContainerStarted","Data":"4d3afccdcaa299daedc2d83a2ce85d92c34807f972ed87fcd49db6bffbc4b2fb"} Nov 25 19:25:54 crc kubenswrapper[4926]: I1125 19:25:54.078047 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.078016704 podStartE2EDuration="2.078016704s" podCreationTimestamp="2025-11-25 19:25:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 19:25:54.067216349 +0000 UTC m=+4384.452729994" watchObservedRunningTime="2025-11-25 19:25:54.078016704 +0000 UTC m=+4384.463530339" Nov 25 19:26:00 crc kubenswrapper[4926]: I1125 19:26:00.341613 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:26:00 crc kubenswrapper[4926]: E1125 19:26:00.342679 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:26:12 crc kubenswrapper[4926]: I1125 19:26:12.329501 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:26:12 crc kubenswrapper[4926]: E1125 19:26:12.330795 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:26:24 crc kubenswrapper[4926]: I1125 19:26:24.330054 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:26:24 crc kubenswrapper[4926]: E1125 19:26:24.331291 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.775948 4926 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.776986 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebbf7f5f-c5dd-4350-80e5-a149e063ea66" containerName="pruner" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.777001 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebbf7f5f-c5dd-4350-80e5-a149e063ea66" containerName="pruner" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.777275 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebbf7f5f-c5dd-4350-80e5-a149e063ea66" containerName="pruner" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.778022 4926 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.778311 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.778497 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b" gracePeriod=15 Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.778584 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987" gracePeriod=15 Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.778662 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba" gracePeriod=15 Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.778644 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112" gracePeriod=15 Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.778749 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665" gracePeriod=15 Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.779558 4926 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.780356 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.780560 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.780610 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.780627 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.780657 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.780674 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.780719 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.780734 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.780757 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.780774 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.780821 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.780838 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.781297 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.781351 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.781424 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.781451 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.781478 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.781963 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.781986 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.782690 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845337 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845461 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845544 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845616 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845776 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845834 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845902 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.845961 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: E1125 19:26:31.886636 4926 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947500 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947652 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947703 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947754 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947801 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947837 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.947926 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948034 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948082 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948113 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948138 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948161 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948188 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948214 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:31 crc kubenswrapper[4926]: I1125 19:26:31.948242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.187766 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:32 crc kubenswrapper[4926]: E1125 19:26:32.252252 4926 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.212:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b568144d36f5a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 19:26:32.251158362 +0000 UTC m=+4422.636672007,LastTimestamp:2025-11-25 19:26:32.251158362 +0000 UTC m=+4422.636672007,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.579420 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"99170b0e1a30f4dbc7b506870f88e90fc56166cf45c699c6bf83182118cb9ccf"} Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.583328 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.585148 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.587389 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665" exitCode=0 Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.587419 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987" exitCode=0 Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.587434 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112" exitCode=0 Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.587444 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba" exitCode=2 Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.587517 4926 scope.go:117] "RemoveContainer" containerID="62922126354ebd2c10d6ed103269a828ad41cf970dafe3b89b351f7b56428199" Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.590093 4926 generic.go:334] "Generic (PLEG): container finished" podID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" containerID="032d8c991ce9f26691817dd12d124503a820e50b1629698375984f165319ab45" exitCode=0 Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.590158 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a","Type":"ContainerDied","Data":"032d8c991ce9f26691817dd12d124503a820e50b1629698375984f165319ab45"} Nov 25 19:26:32 crc kubenswrapper[4926]: I1125 19:26:32.591222 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:33 crc kubenswrapper[4926]: E1125 19:26:33.369638 4926 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/persistence-rabbitmq-cell1-server-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/persistence-rabbitmq-cell1-server-0\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openstack/rabbitmq-cell1-server-0" volumeName="persistence" Nov 25 19:26:33 crc kubenswrapper[4926]: I1125 19:26:33.601031 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"3d6b720a9501bb6cfb724cb864d4f20835ad4dfe5be7ee8dde81357134684ff5"} Nov 25 19:26:33 crc kubenswrapper[4926]: I1125 19:26:33.601879 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:33 crc kubenswrapper[4926]: E1125 19:26:33.602112 4926 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:33 crc kubenswrapper[4926]: I1125 19:26:33.606745 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.116850 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.117669 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.211677 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kube-api-access\") pod \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.211757 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kubelet-dir\") pod \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.212000 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-var-lock\") pod \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\" (UID: \"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a\") " Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.212489 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-var-lock" (OuterVolumeSpecName: "var-lock") pod "7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" (UID: "7b1a7cbf-373e-4fe0-9cec-7ff7def8739a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.215527 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" (UID: "7b1a7cbf-373e-4fe0-9cec-7ff7def8739a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.217763 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" (UID: "7b1a7cbf-373e-4fe0-9cec-7ff7def8739a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.313397 4926 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.313661 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.313675 4926 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7b1a7cbf-373e-4fe0-9cec-7ff7def8739a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.412605 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.413872 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.414934 4926 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.415586 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.519476 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.519599 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.519673 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.521001 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.521095 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.521160 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.521522 4926 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.521567 4926 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.623741 4926 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.625898 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.626977 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b" exitCode=0 Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.627077 4926 scope.go:117] "RemoveContainer" containerID="d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.627276 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.632093 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7b1a7cbf-373e-4fe0-9cec-7ff7def8739a","Type":"ContainerDied","Data":"4d3afccdcaa299daedc2d83a2ce85d92c34807f972ed87fcd49db6bffbc4b2fb"} Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.632159 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d3afccdcaa299daedc2d83a2ce85d92c34807f972ed87fcd49db6bffbc4b2fb" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.632124 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 19:26:34 crc kubenswrapper[4926]: E1125 19:26:34.633310 4926 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.640090 4926 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.640982 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.651925 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.652499 4926 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.668280 4926 scope.go:117] "RemoveContainer" containerID="74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.701283 4926 scope.go:117] "RemoveContainer" containerID="0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.755335 4926 scope.go:117] "RemoveContainer" containerID="c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.794788 4926 scope.go:117] "RemoveContainer" containerID="803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.831863 4926 scope.go:117] "RemoveContainer" containerID="a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.872730 4926 scope.go:117] "RemoveContainer" containerID="d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665" Nov 25 19:26:34 crc kubenswrapper[4926]: E1125 19:26:34.874552 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\": container with ID starting with d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665 not found: ID does not exist" containerID="d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.874598 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665"} err="failed to get container status \"d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\": rpc error: code = NotFound desc = could not find container \"d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665\": container with ID starting with d9ae376a91d7ef72dccb3f2d58da166f479e15309c6d450095bbd3ee7d157665 not found: ID does not exist" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.874625 4926 scope.go:117] "RemoveContainer" containerID="74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987" Nov 25 19:26:34 crc kubenswrapper[4926]: E1125 19:26:34.875172 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\": container with ID starting with 74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987 not found: ID does not exist" containerID="74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.875203 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987"} err="failed to get container status \"74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\": rpc error: code = NotFound desc = could not find container \"74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987\": container with ID starting with 74cc92e5dc189fe8dcb56ac981f790ab425987a3f84d50cabd4fd435cfab5987 not found: ID does not exist" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.875222 4926 scope.go:117] "RemoveContainer" containerID="0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112" Nov 25 19:26:34 crc kubenswrapper[4926]: E1125 19:26:34.876112 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\": container with ID starting with 0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112 not found: ID does not exist" containerID="0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.876139 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112"} err="failed to get container status \"0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\": rpc error: code = NotFound desc = could not find container \"0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112\": container with ID starting with 0dba5e801bc796298b52e385d1953591b2719350255d0fef7dcf19f9ff7da112 not found: ID does not exist" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.876156 4926 scope.go:117] "RemoveContainer" containerID="c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba" Nov 25 19:26:34 crc kubenswrapper[4926]: E1125 19:26:34.876955 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\": container with ID starting with c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba not found: ID does not exist" containerID="c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.876986 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba"} err="failed to get container status \"c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\": rpc error: code = NotFound desc = could not find container \"c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba\": container with ID starting with c84feddecdc5817baabef017193eb27a2cc26eb90c0b03322ca781cd677c57ba not found: ID does not exist" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.877005 4926 scope.go:117] "RemoveContainer" containerID="803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b" Nov 25 19:26:34 crc kubenswrapper[4926]: E1125 19:26:34.877487 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\": container with ID starting with 803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b not found: ID does not exist" containerID="803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.877512 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b"} err="failed to get container status \"803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\": rpc error: code = NotFound desc = could not find container \"803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b\": container with ID starting with 803a252f0689831052a4bb6a3babb4e5bd6454d9f137473605febd59ae5edb9b not found: ID does not exist" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.877529 4926 scope.go:117] "RemoveContainer" containerID="a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6" Nov 25 19:26:34 crc kubenswrapper[4926]: E1125 19:26:34.877832 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\": container with ID starting with a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6 not found: ID does not exist" containerID="a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6" Nov 25 19:26:34 crc kubenswrapper[4926]: I1125 19:26:34.877852 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6"} err="failed to get container status \"a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\": rpc error: code = NotFound desc = could not find container \"a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6\": container with ID starting with a9c8886f694fff9d2dec95c2a2a04a7ed6401e68036a5d1dfaf23b16c4037aa6 not found: ID does not exist" Nov 25 19:26:35 crc kubenswrapper[4926]: I1125 19:26:35.329649 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:26:35 crc kubenswrapper[4926]: E1125 19:26:35.331251 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:26:35 crc kubenswrapper[4926]: E1125 19:26:35.422829 4926 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/tempest-tests-tempest-0-19825: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/tempest-tests-tempest-0-19825\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openstack/tempest-tests-tempest" volumeName="test-operator-logs" Nov 25 19:26:36 crc kubenswrapper[4926]: I1125 19:26:36.351765 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 19:26:36 crc kubenswrapper[4926]: I1125 19:26:36.890987 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 19:26:38 crc kubenswrapper[4926]: E1125 19:26:38.396087 4926 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/prometheus-metric-storage-db-prometheus-metric-storage-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/prometheus-metric-storage-db-prometheus-metric-storage-0\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openstack/prometheus-metric-storage-0" volumeName="prometheus-metric-storage-db" Nov 25 19:26:38 crc kubenswrapper[4926]: E1125 19:26:38.997197 4926 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.212:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b568144d36f5a openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 19:26:32.251158362 +0000 UTC m=+4422.636672007,LastTimestamp:2025-11-25 19:26:32.251158362 +0000 UTC m=+4422.636672007,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 19:26:40 crc kubenswrapper[4926]: I1125 19:26:40.344868 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:41 crc kubenswrapper[4926]: E1125 19:26:41.472973 4926 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:41 crc kubenswrapper[4926]: E1125 19:26:41.473848 4926 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:41 crc kubenswrapper[4926]: E1125 19:26:41.474267 4926 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:41 crc kubenswrapper[4926]: E1125 19:26:41.475761 4926 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:41 crc kubenswrapper[4926]: E1125 19:26:41.476780 4926 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:41 crc kubenswrapper[4926]: I1125 19:26:41.476819 4926 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 19:26:41 crc kubenswrapper[4926]: E1125 19:26:41.477152 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="200ms" Nov 25 19:26:41 crc kubenswrapper[4926]: E1125 19:26:41.678338 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="400ms" Nov 25 19:26:42 crc kubenswrapper[4926]: E1125 19:26:42.079054 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="800ms" Nov 25 19:26:42 crc kubenswrapper[4926]: E1125 19:26:42.381672 4926 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/mysql-db-openstack-galera-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/mysql-db-openstack-galera-0\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openstack/openstack-galera-0" volumeName="mysql-db" Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.733887 4926 generic.go:334] "Generic (PLEG): container finished" podID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerID="88deed01138ef7fdf9e97905c467d5fc72ef2b2d6252b40b5f62375fee4ef227" exitCode=1 Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.733955 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerDied","Data":"88deed01138ef7fdf9e97905c467d5fc72ef2b2d6252b40b5f62375fee4ef227"} Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.734342 4926 scope.go:117] "RemoveContainer" containerID="88deed01138ef7fdf9e97905c467d5fc72ef2b2d6252b40b5f62375fee4ef227" Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.735561 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.736186 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.737017 4926 generic.go:334] "Generic (PLEG): container finished" podID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerID="78b7d6650635333098268bbb9a192fcf36d9915ac328aeb59802be4c4076ea54" exitCode=1 Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.737047 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerDied","Data":"78b7d6650635333098268bbb9a192fcf36d9915ac328aeb59802be4c4076ea54"} Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.737473 4926 scope.go:117] "RemoveContainer" containerID="78b7d6650635333098268bbb9a192fcf36d9915ac328aeb59802be4c4076ea54" Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.737800 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.738080 4926 status_manager.go:851] "Failed to get status for pod" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-89dkl\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:42 crc kubenswrapper[4926]: I1125 19:26:42.738488 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:42 crc kubenswrapper[4926]: E1125 19:26:42.880119 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="1.6s" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.556658 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" podUID="2ac11a24-0681-41d4-b943-8bf5b5396a40" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.67:8080/readyz\": dial tcp 10.217.0.67:8080: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.753139 4926 generic.go:334] "Generic (PLEG): container finished" podID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerID="4979fe87f2fd4931e357ee3e36c5360952d0a23e11f2b29399523d4d20218c19" exitCode=1 Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.753215 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerDied","Data":"4979fe87f2fd4931e357ee3e36c5360952d0a23e11f2b29399523d4d20218c19"} Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.753275 4926 scope.go:117] "RemoveContainer" containerID="88deed01138ef7fdf9e97905c467d5fc72ef2b2d6252b40b5f62375fee4ef227" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.754175 4926 scope.go:117] "RemoveContainer" containerID="4979fe87f2fd4931e357ee3e36c5360952d0a23e11f2b29399523d4d20218c19" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.754392 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: E1125 19:26:43.754931 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-s4wxr_openstack-operators(f9d1a5dc-de6e-45fa-ab5d-1de529f40894)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.755463 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.756106 4926 status_manager.go:851] "Failed to get status for pod" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-89dkl\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.757829 4926 generic.go:334] "Generic (PLEG): container finished" podID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerID="8d0d215e93c8029df7d91d0fe4aad569513eda8016721dd49baf577e1142d463" exitCode=1 Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.757921 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerDied","Data":"8d0d215e93c8029df7d91d0fe4aad569513eda8016721dd49baf577e1142d463"} Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.758892 4926 scope.go:117] "RemoveContainer" containerID="8d0d215e93c8029df7d91d0fe4aad569513eda8016721dd49baf577e1142d463" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.759036 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: E1125 19:26:43.759322 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-89dkl_openstack-operators(b70bd0b1-5555-49f4-ae5f-dfeebd005029)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.759942 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.760421 4926 status_manager.go:851] "Failed to get status for pod" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-89dkl\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.761939 4926 generic.go:334] "Generic (PLEG): container finished" podID="2ac11a24-0681-41d4-b943-8bf5b5396a40" containerID="d6632cfdd84fbaaf1d498e5e9ca0d8acba4a3a5af14fa8361648562c9806ecf6" exitCode=1 Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.761991 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" event={"ID":"2ac11a24-0681-41d4-b943-8bf5b5396a40","Type":"ContainerDied","Data":"d6632cfdd84fbaaf1d498e5e9ca0d8acba4a3a5af14fa8361648562c9806ecf6"} Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.762866 4926 scope.go:117] "RemoveContainer" containerID="d6632cfdd84fbaaf1d498e5e9ca0d8acba4a3a5af14fa8361648562c9806ecf6" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.762954 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.763280 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.764471 4926 status_manager.go:851] "Failed to get status for pod" podUID="2ac11a24-0681-41d4-b943-8bf5b5396a40" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5c55bddd9c-5nmb8\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.764913 4926 status_manager.go:851] "Failed to get status for pod" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-89dkl\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:43 crc kubenswrapper[4926]: I1125 19:26:43.903732 4926 scope.go:117] "RemoveContainer" containerID="78b7d6650635333098268bbb9a192fcf36d9915ac328aeb59802be4c4076ea54" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.328223 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.329727 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.330114 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.330527 4926 status_manager.go:851] "Failed to get status for pod" podUID="2ac11a24-0681-41d4-b943-8bf5b5396a40" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5c55bddd9c-5nmb8\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.330862 4926 status_manager.go:851] "Failed to get status for pod" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-89dkl\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.360168 4926 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.360549 4926 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:44 crc kubenswrapper[4926]: E1125 19:26:44.361514 4926 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.362764 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:44 crc kubenswrapper[4926]: E1125 19:26:44.481325 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.212:6443: connect: connection refused" interval="3.2s" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.773157 4926 generic.go:334] "Generic (PLEG): container finished" podID="2ac11a24-0681-41d4-b943-8bf5b5396a40" containerID="0d5242e3c4c545ee810b5f3ca621a357599eee5a9425cdfdc99f462fce8850aa" exitCode=1 Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.773234 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" event={"ID":"2ac11a24-0681-41d4-b943-8bf5b5396a40","Type":"ContainerDied","Data":"0d5242e3c4c545ee810b5f3ca621a357599eee5a9425cdfdc99f462fce8850aa"} Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.773606 4926 scope.go:117] "RemoveContainer" containerID="d6632cfdd84fbaaf1d498e5e9ca0d8acba4a3a5af14fa8361648562c9806ecf6" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.774349 4926 scope.go:117] "RemoveContainer" containerID="0d5242e3c4c545ee810b5f3ca621a357599eee5a9425cdfdc99f462fce8850aa" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.774490 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.774745 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: E1125 19:26:44.774827 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-5c55bddd9c-5nmb8_metallb-system(2ac11a24-0681-41d4-b943-8bf5b5396a40)\"" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" podUID="2ac11a24-0681-41d4-b943-8bf5b5396a40" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.774994 4926 status_manager.go:851] "Failed to get status for pod" podUID="2ac11a24-0681-41d4-b943-8bf5b5396a40" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5c55bddd9c-5nmb8\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.775305 4926 status_manager.go:851] "Failed to get status for pod" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-89dkl\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.779158 4926 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="1762f682279f71f7507ee5f1e4832663e019a5d578ba7bba0dcb9514b32082c7" exitCode=0 Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.779200 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"1762f682279f71f7507ee5f1e4832663e019a5d578ba7bba0dcb9514b32082c7"} Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.779226 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"984c634bf726e66861ccdc2609ea7ee70deff5448707e7c6ac0627e981251fc1"} Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.779484 4926 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.779506 4926 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:44 crc kubenswrapper[4926]: E1125 19:26:44.779949 4926 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.780647 4926 status_manager.go:851] "Failed to get status for pod" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-89dkl\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.781872 4926 status_manager.go:851] "Failed to get status for pod" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.782629 4926 status_manager.go:851] "Failed to get status for pod" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-s4wxr\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:44 crc kubenswrapper[4926]: I1125 19:26:44.783253 4926 status_manager.go:851] "Failed to get status for pod" podUID="2ac11a24-0681-41d4-b943-8bf5b5396a40" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-5c55bddd9c-5nmb8\": dial tcp 38.102.83.212:6443: connect: connection refused" Nov 25 19:26:45 crc kubenswrapper[4926]: I1125 19:26:45.796350 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3ff3b488b6f1dbbe58f2eb2c70db33666fdabe819e86a0084da8d6a81c535a8f"} Nov 25 19:26:45 crc kubenswrapper[4926]: I1125 19:26:45.796878 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e9bdde527ee1046f7e20865ebf84bc570e3e85b77d49d09bf07052c95bdc6cff"} Nov 25 19:26:45 crc kubenswrapper[4926]: I1125 19:26:45.796894 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fb8757dd4b1b8fc7a75ac856f06c90ec1b732cdf15b173f006187a43659587c4"} Nov 25 19:26:45 crc kubenswrapper[4926]: I1125 19:26:45.799689 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 19:26:45 crc kubenswrapper[4926]: I1125 19:26:45.799738 4926 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa" exitCode=1 Nov 25 19:26:45 crc kubenswrapper[4926]: I1125 19:26:45.799764 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa"} Nov 25 19:26:45 crc kubenswrapper[4926]: I1125 19:26:45.800437 4926 scope.go:117] "RemoveContainer" containerID="c1c1f05ca34e641b3e0af3abb19200847a915690d15dd7736591479da48fd6aa" Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.813304 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"927c817025183c4a73548ae57f488a3be08fb6e85179eea0c97180df9b492576"} Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.813735 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.813486 4926 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.813780 4926 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.814061 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"6dfc5a780380121449f4ae010ea58a05cdb1a4a2a2f3fcf459d5418ca9a4d9dc"} Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.816095 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.816139 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"433ac7e153230ba387f9c264ebe920b5773b5f99e726bd7e3ad34c04712b01f8"} Nov 25 19:26:46 crc kubenswrapper[4926]: I1125 19:26:46.895819 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 19:26:49 crc kubenswrapper[4926]: I1125 19:26:49.330359 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:26:49 crc kubenswrapper[4926]: E1125 19:26:49.332674 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:26:49 crc kubenswrapper[4926]: I1125 19:26:49.364186 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:49 crc kubenswrapper[4926]: I1125 19:26:49.364670 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:49 crc kubenswrapper[4926]: I1125 19:26:49.369910 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.328962 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.331040 4926 scope.go:117] "RemoveContainer" containerID="8d0d215e93c8029df7d91d0fe4aad569513eda8016721dd49baf577e1142d463" Nov 25 19:26:51 crc kubenswrapper[4926]: E1125 19:26:51.331490 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-89dkl_openstack-operators(b70bd0b1-5555-49f4-ae5f-dfeebd005029)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.689895 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.690603 4926 scope.go:117] "RemoveContainer" containerID="4979fe87f2fd4931e357ee3e36c5360952d0a23e11f2b29399523d4d20218c19" Nov 25 19:26:51 crc kubenswrapper[4926]: E1125 19:26:51.690828 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-s4wxr_openstack-operators(f9d1a5dc-de6e-45fa-ab5d-1de529f40894)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.826594 4926 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.859752 4926 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.859798 4926 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.872670 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.877435 4926 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="37fcf979-578d-4b6f-8850-ca4e5d78b0cd" Nov 25 19:26:51 crc kubenswrapper[4926]: I1125 19:26:51.947617 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.871246 4926 generic.go:334] "Generic (PLEG): container finished" podID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerID="008a9fccfa7f8d5af7cc3867df37579f65425476b19a4aad6da69932129086c4" exitCode=1 Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.871420 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerDied","Data":"008a9fccfa7f8d5af7cc3867df37579f65425476b19a4aad6da69932129086c4"} Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.872091 4926 scope.go:117] "RemoveContainer" containerID="008a9fccfa7f8d5af7cc3867df37579f65425476b19a4aad6da69932129086c4" Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.873171 4926 generic.go:334] "Generic (PLEG): container finished" podID="50b0b29f-bc51-4109-88ce-84d3223fc78e" containerID="0c82d61551d4c6b2303a001e4a667b7bf3dbfb6079a898be3a217aaaaf186484" exitCode=1 Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.873215 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" event={"ID":"50b0b29f-bc51-4109-88ce-84d3223fc78e","Type":"ContainerDied","Data":"0c82d61551d4c6b2303a001e4a667b7bf3dbfb6079a898be3a217aaaaf186484"} Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.873536 4926 scope.go:117] "RemoveContainer" containerID="0c82d61551d4c6b2303a001e4a667b7bf3dbfb6079a898be3a217aaaaf186484" Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.881528 4926 generic.go:334] "Generic (PLEG): container finished" podID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerID="a703b3ca21c7188fc90a832e0b74a11850db4ca3065ab85f9996a985bc86c604" exitCode=1 Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.881624 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerDied","Data":"a703b3ca21c7188fc90a832e0b74a11850db4ca3065ab85f9996a985bc86c604"} Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.882912 4926 scope.go:117] "RemoveContainer" containerID="a703b3ca21c7188fc90a832e0b74a11850db4ca3065ab85f9996a985bc86c604" Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.884177 4926 generic.go:334] "Generic (PLEG): container finished" podID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" containerID="4a11c7a32b52da25f55657b390f56bfc989ba7e9902399ab5cd9c90dafd9911f" exitCode=1 Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.884243 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerDied","Data":"4a11c7a32b52da25f55657b390f56bfc989ba7e9902399ab5cd9c90dafd9911f"} Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.884652 4926 scope.go:117] "RemoveContainer" containerID="4a11c7a32b52da25f55657b390f56bfc989ba7e9902399ab5cd9c90dafd9911f" Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.891743 4926 generic.go:334] "Generic (PLEG): container finished" podID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerID="57ba6053b6d768094587b78baa67e67f682c7f8d16bf738b7b7152a97a7a56b2" exitCode=1 Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.891838 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerDied","Data":"57ba6053b6d768094587b78baa67e67f682c7f8d16bf738b7b7152a97a7a56b2"} Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.892179 4926 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.892224 4926 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:26:52 crc kubenswrapper[4926]: I1125 19:26:52.892748 4926 scope.go:117] "RemoveContainer" containerID="57ba6053b6d768094587b78baa67e67f682c7f8d16bf738b7b7152a97a7a56b2" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.049738 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.049839 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.525626 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.529217 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.556296 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.557269 4926 scope.go:117] "RemoveContainer" containerID="0d5242e3c4c545ee810b5f3ca621a357599eee5a9425cdfdc99f462fce8850aa" Nov 25 19:26:53 crc kubenswrapper[4926]: E1125 19:26:53.557542 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-5c55bddd9c-5nmb8_metallb-system(2ac11a24-0681-41d4-b943-8bf5b5396a40)\"" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" podUID="2ac11a24-0681-41d4-b943-8bf5b5396a40" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.938364 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerStarted","Data":"95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5"} Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.938867 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.940998 4926 generic.go:334] "Generic (PLEG): container finished" podID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerID="fe7de231f90153efe556ecbd82e9a9aac08ff78998823dc181be1c432f0e3255" exitCode=1 Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.941045 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" event={"ID":"7ac21b6b-e21a-43db-acf1-cce61bf188ef","Type":"ContainerDied","Data":"fe7de231f90153efe556ecbd82e9a9aac08ff78998823dc181be1c432f0e3255"} Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.941708 4926 scope.go:117] "RemoveContainer" containerID="fe7de231f90153efe556ecbd82e9a9aac08ff78998823dc181be1c432f0e3255" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.956593 4926 generic.go:334] "Generic (PLEG): container finished" podID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerID="ac7fe094232b30f96f7d3cb92f2bd9e120f268fba1447365cbdad1cda1c2cf02" exitCode=1 Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.956656 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerDied","Data":"ac7fe094232b30f96f7d3cb92f2bd9e120f268fba1447365cbdad1cda1c2cf02"} Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.956687 4926 scope.go:117] "RemoveContainer" containerID="008a9fccfa7f8d5af7cc3867df37579f65425476b19a4aad6da69932129086c4" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.957242 4926 scope.go:117] "RemoveContainer" containerID="ac7fe094232b30f96f7d3cb92f2bd9e120f268fba1447365cbdad1cda1c2cf02" Nov 25 19:26:53 crc kubenswrapper[4926]: E1125 19:26:53.958510 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-l4vqr_openstack-operators(c67a3051-deee-4c35-b2fd-73f0f96ccbac)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.968749 4926 generic.go:334] "Generic (PLEG): container finished" podID="74627669-e952-4db6-b082-5e7bd38b03b3" containerID="7c3f5f6908f29670cc972cd774e05d8651974681ae5f49d1de3f953399eff98c" exitCode=1 Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.968880 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerDied","Data":"7c3f5f6908f29670cc972cd774e05d8651974681ae5f49d1de3f953399eff98c"} Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.969524 4926 scope.go:117] "RemoveContainer" containerID="7c3f5f6908f29670cc972cd774e05d8651974681ae5f49d1de3f953399eff98c" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.980883 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerStarted","Data":"ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64"} Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.981677 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.986514 4926 generic.go:334] "Generic (PLEG): container finished" podID="abc17280-a647-4d60-8a1a-d01505970238" containerID="9b3870ae68423320d87cdd5c3a13a8bfb425ee7a43670235d9404ce8009e75cf" exitCode=1 Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.986623 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" event={"ID":"abc17280-a647-4d60-8a1a-d01505970238","Type":"ContainerDied","Data":"9b3870ae68423320d87cdd5c3a13a8bfb425ee7a43670235d9404ce8009e75cf"} Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.987397 4926 scope.go:117] "RemoveContainer" containerID="9b3870ae68423320d87cdd5c3a13a8bfb425ee7a43670235d9404ce8009e75cf" Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.992476 4926 generic.go:334] "Generic (PLEG): container finished" podID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerID="7d32ed9d7c97edc95b6d39ad1cc88a376fd3dcbea77a0e9db77263fe120856f7" exitCode=1 Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.992536 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerDied","Data":"7d32ed9d7c97edc95b6d39ad1cc88a376fd3dcbea77a0e9db77263fe120856f7"} Nov 25 19:26:53 crc kubenswrapper[4926]: I1125 19:26:53.993401 4926 scope.go:117] "RemoveContainer" containerID="7d32ed9d7c97edc95b6d39ad1cc88a376fd3dcbea77a0e9db77263fe120856f7" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.002360 4926 generic.go:334] "Generic (PLEG): container finished" podID="3b14286a-e339-4bd3-835c-67287c341869" containerID="7942fd3051661ba979dd20a373ed0ebb98018b89a78e619a9c6c2c11c6e7b104" exitCode=1 Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.002467 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" event={"ID":"3b14286a-e339-4bd3-835c-67287c341869","Type":"ContainerDied","Data":"7942fd3051661ba979dd20a373ed0ebb98018b89a78e619a9c6c2c11c6e7b104"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.003426 4926 scope.go:117] "RemoveContainer" containerID="7942fd3051661ba979dd20a373ed0ebb98018b89a78e619a9c6c2c11c6e7b104" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.006926 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerStarted","Data":"60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.007737 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.009766 4926 generic.go:334] "Generic (PLEG): container finished" podID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerID="d33a3388e5be48ae779ab3689c51486c6d865e7005fd45b826b74f00d8c84e5e" exitCode=1 Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.009805 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" event={"ID":"4e869634-c2f9-4248-8ad7-dd9af0315f2b","Type":"ContainerDied","Data":"d33a3388e5be48ae779ab3689c51486c6d865e7005fd45b826b74f00d8c84e5e"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.010118 4926 scope.go:117] "RemoveContainer" containerID="d33a3388e5be48ae779ab3689c51486c6d865e7005fd45b826b74f00d8c84e5e" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.022431 4926 generic.go:334] "Generic (PLEG): container finished" podID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" containerID="afb872d1072f1092b17ff41b103e9985cf27a560cb10b1a33bc68b0903230f95" exitCode=1 Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.022513 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" event={"ID":"9dfe0bac-7a60-47c9-bef9-e34a75d23521","Type":"ContainerDied","Data":"afb872d1072f1092b17ff41b103e9985cf27a560cb10b1a33bc68b0903230f95"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.022963 4926 scope.go:117] "RemoveContainer" containerID="afb872d1072f1092b17ff41b103e9985cf27a560cb10b1a33bc68b0903230f95" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.028695 4926 generic.go:334] "Generic (PLEG): container finished" podID="230b098e-8a89-417e-b5aa-994695273779" containerID="fc4d40c0f9343c6adbc15c21e42091c214d9cc4e696c2eb9b10c24d263495568" exitCode=1 Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.028768 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerDied","Data":"fc4d40c0f9343c6adbc15c21e42091c214d9cc4e696c2eb9b10c24d263495568"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.029868 4926 scope.go:117] "RemoveContainer" containerID="fc4d40c0f9343c6adbc15c21e42091c214d9cc4e696c2eb9b10c24d263495568" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.034864 4926 generic.go:334] "Generic (PLEG): container finished" podID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerID="36d5a4029a03cefd233b081cf058512bd816348c086fb84f67bd9bc68e0d3f57" exitCode=1 Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.034938 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerDied","Data":"36d5a4029a03cefd233b081cf058512bd816348c086fb84f67bd9bc68e0d3f57"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.035635 4926 scope.go:117] "RemoveContainer" containerID="36d5a4029a03cefd233b081cf058512bd816348c086fb84f67bd9bc68e0d3f57" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.039013 4926 generic.go:334] "Generic (PLEG): container finished" podID="421c1930-795c-4e93-9865-bff40d49ddf5" containerID="c7c18cc5dacf53993e8b01e0ffa88f9592749f627e6c3a7e0d872ab42c51668b" exitCode=1 Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.039069 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerDied","Data":"c7c18cc5dacf53993e8b01e0ffa88f9592749f627e6c3a7e0d872ab42c51668b"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.039659 4926 scope.go:117] "RemoveContainer" containerID="c7c18cc5dacf53993e8b01e0ffa88f9592749f627e6c3a7e0d872ab42c51668b" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.046737 4926 generic.go:334] "Generic (PLEG): container finished" podID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerID="62ee1f8d5cb505d935396baebef3bd748835ef377da13a0a40c81288f3c7f9ad" exitCode=1 Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.046809 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" event={"ID":"e15f3b97-0859-4f12-87cd-514fab3d75aa","Type":"ContainerDied","Data":"62ee1f8d5cb505d935396baebef3bd748835ef377da13a0a40c81288f3c7f9ad"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.048594 4926 scope.go:117] "RemoveContainer" containerID="62ee1f8d5cb505d935396baebef3bd748835ef377da13a0a40c81288f3c7f9ad" Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.058765 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" event={"ID":"50b0b29f-bc51-4109-88ce-84d3223fc78e","Type":"ContainerStarted","Data":"cc32d7f930781108fb735b89dd5ea89a47e4d492769f67a4e4071fecc54e9758"} Nov 25 19:26:54 crc kubenswrapper[4926]: I1125 19:26:54.059244 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.068341 4926 generic.go:334] "Generic (PLEG): container finished" podID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerID="54dfc64f2d94f9d9ebb7fab0fe449487d912d8c416ffb0208c9e0889db721e1c" exitCode=1 Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.068441 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerDied","Data":"54dfc64f2d94f9d9ebb7fab0fe449487d912d8c416ffb0208c9e0889db721e1c"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.069576 4926 scope.go:117] "RemoveContainer" containerID="54dfc64f2d94f9d9ebb7fab0fe449487d912d8c416ffb0208c9e0889db721e1c" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.071355 4926 generic.go:334] "Generic (PLEG): container finished" podID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerID="688df1ca7e2c9e28a17c9c8873994c87fd5799073043ed01c30bef3ed85c7407" exitCode=1 Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.071461 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerDied","Data":"688df1ca7e2c9e28a17c9c8873994c87fd5799073043ed01c30bef3ed85c7407"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.071934 4926 scope.go:117] "RemoveContainer" containerID="688df1ca7e2c9e28a17c9c8873994c87fd5799073043ed01c30bef3ed85c7407" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.074400 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerStarted","Data":"6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.074591 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.077514 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" event={"ID":"3b14286a-e339-4bd3-835c-67287c341869","Type":"ContainerStarted","Data":"93237a57039805d075306d822cd016e0ca2c67ddf321408846ea844fd69c694e"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.077681 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.081689 4926 generic.go:334] "Generic (PLEG): container finished" podID="74627669-e952-4db6-b082-5e7bd38b03b3" containerID="6b8522a4a4cbe9d8cf7a9dd27e22fe142c9911073fbec4a2fe1eb32cef2a7fe6" exitCode=1 Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.081764 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerDied","Data":"6b8522a4a4cbe9d8cf7a9dd27e22fe142c9911073fbec4a2fe1eb32cef2a7fe6"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.081811 4926 scope.go:117] "RemoveContainer" containerID="7c3f5f6908f29670cc972cd774e05d8651974681ae5f49d1de3f953399eff98c" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.082539 4926 scope.go:117] "RemoveContainer" containerID="6b8522a4a4cbe9d8cf7a9dd27e22fe142c9911073fbec4a2fe1eb32cef2a7fe6" Nov 25 19:26:55 crc kubenswrapper[4926]: E1125 19:26:55.082785 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-k8j22_openstack-operators(74627669-e952-4db6-b082-5e7bd38b03b3)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.088544 4926 generic.go:334] "Generic (PLEG): container finished" podID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerID="ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64" exitCode=1 Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.088625 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerDied","Data":"ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.089526 4926 scope.go:117] "RemoveContainer" containerID="ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64" Nov 25 19:26:55 crc kubenswrapper[4926]: E1125 19:26:55.090017 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-mc5kd_openstack-operators(5885db97-a86c-482e-9851-2d8351dc0c3a)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.098679 4926 generic.go:334] "Generic (PLEG): container finished" podID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerID="30cb023ac6bc185e0bdee42576a8e8ae67ff00d6c22cd65550bfcb64bfaf1066" exitCode=1 Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.099002 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerDied","Data":"30cb023ac6bc185e0bdee42576a8e8ae67ff00d6c22cd65550bfcb64bfaf1066"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.099900 4926 scope.go:117] "RemoveContainer" containerID="30cb023ac6bc185e0bdee42576a8e8ae67ff00d6c22cd65550bfcb64bfaf1066" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.106437 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerStarted","Data":"6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.107727 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.124428 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" event={"ID":"4e869634-c2f9-4248-8ad7-dd9af0315f2b","Type":"ContainerStarted","Data":"32052467095ba9616c24c37332fe65479a6eaefedf898c4e9da34e2ff93b4abd"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.124676 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.136133 4926 scope.go:117] "RemoveContainer" containerID="ac7fe094232b30f96f7d3cb92f2bd9e120f268fba1447365cbdad1cda1c2cf02" Nov 25 19:26:55 crc kubenswrapper[4926]: E1125 19:26:55.136462 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-l4vqr_openstack-operators(c67a3051-deee-4c35-b2fd-73f0f96ccbac)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.143846 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" event={"ID":"e15f3b97-0859-4f12-87cd-514fab3d75aa","Type":"ContainerStarted","Data":"59c91cd3a58deaead5cbd150319e5b4641d4177fecc0afa006306d7cc8006cdf"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.145093 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.149955 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerStarted","Data":"f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.151096 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.154664 4926 generic.go:334] "Generic (PLEG): container finished" podID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerID="60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931" exitCode=1 Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.154746 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerDied","Data":"60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.155292 4926 scope.go:117] "RemoveContainer" containerID="60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931" Nov 25 19:26:55 crc kubenswrapper[4926]: E1125 19:26:55.155620 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-44shk_openstack-operators(5859a238-ed77-4ef7-ac69-295bd1c875c3)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.159163 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" event={"ID":"9dfe0bac-7a60-47c9-bef9-e34a75d23521","Type":"ContainerStarted","Data":"6572a2edee824de02b34a454ca22774b90a9ee94eb95638a3b67ae6c1382a216"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.160029 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.177179 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" event={"ID":"7ac21b6b-e21a-43db-acf1-cce61bf188ef","Type":"ContainerStarted","Data":"735b740ab16198620a6d98149a3ea7645b34b368d0ae2e7aa0ead346f9d9fbdc"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.178234 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.185533 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerStarted","Data":"94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.187002 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.198527 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" event={"ID":"abc17280-a647-4d60-8a1a-d01505970238","Type":"ContainerStarted","Data":"7ad17dbc1670ab6c5beada7490a88c288ef145ee7c5a6b5d604ce12ed26800e5"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.213862 4926 generic.go:334] "Generic (PLEG): container finished" podID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" containerID="95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5" exitCode=1 Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.214023 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerDied","Data":"95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5"} Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.216456 4926 scope.go:117] "RemoveContainer" containerID="95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5" Nov 25 19:26:55 crc kubenswrapper[4926]: E1125 19:26:55.216789 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-h55p4_openstack-operators(1df300a3-1d64-4e46-a0b5-9fe0bf029321)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podUID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.228307 4926 scope.go:117] "RemoveContainer" containerID="57ba6053b6d768094587b78baa67e67f682c7f8d16bf738b7b7152a97a7a56b2" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.315634 4926 scope.go:117] "RemoveContainer" containerID="a703b3ca21c7188fc90a832e0b74a11850db4ca3065ab85f9996a985bc86c604" Nov 25 19:26:55 crc kubenswrapper[4926]: I1125 19:26:55.419098 4926 scope.go:117] "RemoveContainer" containerID="4a11c7a32b52da25f55657b390f56bfc989ba7e9902399ab5cd9c90dafd9911f" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.231904 4926 scope.go:117] "RemoveContainer" containerID="95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.232571 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-h55p4_openstack-operators(1df300a3-1d64-4e46-a0b5-9fe0bf029321)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podUID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.234012 4926 generic.go:334] "Generic (PLEG): container finished" podID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerID="59c91cd3a58deaead5cbd150319e5b4641d4177fecc0afa006306d7cc8006cdf" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.234088 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" event={"ID":"e15f3b97-0859-4f12-87cd-514fab3d75aa","Type":"ContainerDied","Data":"59c91cd3a58deaead5cbd150319e5b4641d4177fecc0afa006306d7cc8006cdf"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.234121 4926 scope.go:117] "RemoveContainer" containerID="62ee1f8d5cb505d935396baebef3bd748835ef377da13a0a40c81288f3c7f9ad" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.234792 4926 scope.go:117] "RemoveContainer" containerID="59c91cd3a58deaead5cbd150319e5b4641d4177fecc0afa006306d7cc8006cdf" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.235090 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-79c67b7c89-tcqww_openstack-operators(e15f3b97-0859-4f12-87cd-514fab3d75aa)\"" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.242817 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8322c05-5b96-4489-87a7-1677f90df80c" containerID="ca8da6f6a7513d8c23b4eb82db40d9aa8d27584618441719813ab7e73e9b91cf" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.242907 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerDied","Data":"ca8da6f6a7513d8c23b4eb82db40d9aa8d27584618441719813ab7e73e9b91cf"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.243458 4926 scope.go:117] "RemoveContainer" containerID="ca8da6f6a7513d8c23b4eb82db40d9aa8d27584618441719813ab7e73e9b91cf" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.247908 4926 generic.go:334] "Generic (PLEG): container finished" podID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerID="6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.248005 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerDied","Data":"6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.248473 4926 scope.go:117] "RemoveContainer" containerID="6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.248782 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_openstack-operators(c613eed5-f72e-4b4d-8283-5aa4e6241157)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.258839 4926 generic.go:334] "Generic (PLEG): container finished" podID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerID="60bd041d44162b4f0845f7ace8cd6a6df4d0e8da6b46b381095802d1b47b68cb" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.258916 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerDied","Data":"60bd041d44162b4f0845f7ace8cd6a6df4d0e8da6b46b381095802d1b47b68cb"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.259656 4926 scope.go:117] "RemoveContainer" containerID="60bd041d44162b4f0845f7ace8cd6a6df4d0e8da6b46b381095802d1b47b68cb" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.260015 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-rslqc_openstack-operators(b4c6b194-9a8e-4cdb-a0e0-e67dce03328f)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.263167 4926 generic.go:334] "Generic (PLEG): container finished" podID="3b14286a-e339-4bd3-835c-67287c341869" containerID="93237a57039805d075306d822cd016e0ca2c67ddf321408846ea844fd69c694e" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.263253 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" event={"ID":"3b14286a-e339-4bd3-835c-67287c341869","Type":"ContainerDied","Data":"93237a57039805d075306d822cd016e0ca2c67ddf321408846ea844fd69c694e"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.263986 4926 scope.go:117] "RemoveContainer" containerID="93237a57039805d075306d822cd016e0ca2c67ddf321408846ea844fd69c694e" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.264298 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5q59t_openstack-operators(3b14286a-e339-4bd3-835c-67287c341869)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" podUID="3b14286a-e339-4bd3-835c-67287c341869" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.270155 4926 scope.go:117] "RemoveContainer" containerID="60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.270798 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-44shk_openstack-operators(5859a238-ed77-4ef7-ac69-295bd1c875c3)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.275299 4926 generic.go:334] "Generic (PLEG): container finished" podID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" containerID="6572a2edee824de02b34a454ca22774b90a9ee94eb95638a3b67ae6c1382a216" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.275368 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" event={"ID":"9dfe0bac-7a60-47c9-bef9-e34a75d23521","Type":"ContainerDied","Data":"6572a2edee824de02b34a454ca22774b90a9ee94eb95638a3b67ae6c1382a216"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.276171 4926 scope.go:117] "RemoveContainer" containerID="6572a2edee824de02b34a454ca22774b90a9ee94eb95638a3b67ae6c1382a216" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.276524 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-gr8fl_openstack-operators(9dfe0bac-7a60-47c9-bef9-e34a75d23521)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" podUID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.281099 4926 generic.go:334] "Generic (PLEG): container finished" podID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerID="2b923303522d456ac34c475e103cfea66cd0f4fc2f1e1137ec8064d89ceb8efb" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.281192 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerDied","Data":"2b923303522d456ac34c475e103cfea66cd0f4fc2f1e1137ec8064d89ceb8efb"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.282297 4926 scope.go:117] "RemoveContainer" containerID="2b923303522d456ac34c475e103cfea66cd0f4fc2f1e1137ec8064d89ceb8efb" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.282662 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-bpsp8_openstack-operators(e949ca02-dbd2-4361-8b44-a498d1ec4c13)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.284531 4926 generic.go:334] "Generic (PLEG): container finished" podID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerID="892cf2c781f958010bdc1bb34e3c650396e61eefe06323dd35850c78848bcfd5" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.284612 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerDied","Data":"892cf2c781f958010bdc1bb34e3c650396e61eefe06323dd35850c78848bcfd5"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.284982 4926 scope.go:117] "RemoveContainer" containerID="892cf2c781f958010bdc1bb34e3c650396e61eefe06323dd35850c78848bcfd5" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.289407 4926 generic.go:334] "Generic (PLEG): container finished" podID="abc17280-a647-4d60-8a1a-d01505970238" containerID="7ad17dbc1670ab6c5beada7490a88c288ef145ee7c5a6b5d604ce12ed26800e5" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.289486 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" event={"ID":"abc17280-a647-4d60-8a1a-d01505970238","Type":"ContainerDied","Data":"7ad17dbc1670ab6c5beada7490a88c288ef145ee7c5a6b5d604ce12ed26800e5"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.290123 4926 scope.go:117] "RemoveContainer" containerID="7ad17dbc1670ab6c5beada7490a88c288ef145ee7c5a6b5d604ce12ed26800e5" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.290454 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-v9gvn_openstack-operators(abc17280-a647-4d60-8a1a-d01505970238)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" podUID="abc17280-a647-4d60-8a1a-d01505970238" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.307445 4926 generic.go:334] "Generic (PLEG): container finished" podID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerID="f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.307501 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerDied","Data":"f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.308216 4926 scope.go:117] "RemoveContainer" containerID="f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.308466 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-647d45fc97-x65c4_openstack-operators(596d3616-ddec-489c-be4d-7e340f9e2acb)\"" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.312192 4926 scope.go:117] "RemoveContainer" containerID="ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.312477 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-mc5kd_openstack-operators(5885db97-a86c-482e-9851-2d8351dc0c3a)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.315179 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8395389-762a-497d-972e-0987350a9a00" containerID="c6948f393a9574f110453f8fe0025337a9d98251266c7a3c248197499afd3203" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.315260 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerDied","Data":"c6948f393a9574f110453f8fe0025337a9d98251266c7a3c248197499afd3203"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.316071 4926 scope.go:117] "RemoveContainer" containerID="c6948f393a9574f110453f8fe0025337a9d98251266c7a3c248197499afd3203" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.325386 4926 generic.go:334] "Generic (PLEG): container finished" podID="421c1930-795c-4e93-9865-bff40d49ddf5" containerID="94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.325477 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerDied","Data":"94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.326359 4926 scope.go:117] "RemoveContainer" containerID="94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.326926 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-dxhsp_openstack-operators(421c1930-795c-4e93-9865-bff40d49ddf5)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.327152 4926 scope.go:117] "RemoveContainer" containerID="7d32ed9d7c97edc95b6d39ad1cc88a376fd3dcbea77a0e9db77263fe120856f7" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.330359 4926 generic.go:334] "Generic (PLEG): container finished" podID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerID="aa8d292d9a2f1699aa3fe0e2acec83441c679322e771da63fb1ccbd52e907abb" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.336181 4926 generic.go:334] "Generic (PLEG): container finished" podID="230b098e-8a89-417e-b5aa-994695273779" containerID="6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c" exitCode=1 Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.337201 4926 scope.go:117] "RemoveContainer" containerID="6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.337566 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-gcvkp_openstack-operators(230b098e-8a89-417e-b5aa-994695273779)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.365857 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerDied","Data":"aa8d292d9a2f1699aa3fe0e2acec83441c679322e771da63fb1ccbd52e907abb"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.365902 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerDied","Data":"6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c"} Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.366678 4926 scope.go:117] "RemoveContainer" containerID="aa8d292d9a2f1699aa3fe0e2acec83441c679322e771da63fb1ccbd52e907abb" Nov 25 19:26:56 crc kubenswrapper[4926]: E1125 19:26:56.367004 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rl7hc_openstack-operators(306a2bb2-20b9-436d-809a-55499e85e4d6)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.496498 4926 scope.go:117] "RemoveContainer" containerID="688df1ca7e2c9e28a17c9c8873994c87fd5799073043ed01c30bef3ed85c7407" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.608408 4926 scope.go:117] "RemoveContainer" containerID="7942fd3051661ba979dd20a373ed0ebb98018b89a78e619a9c6c2c11c6e7b104" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.650719 4926 scope.go:117] "RemoveContainer" containerID="afb872d1072f1092b17ff41b103e9985cf27a560cb10b1a33bc68b0903230f95" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.689288 4926 scope.go:117] "RemoveContainer" containerID="54dfc64f2d94f9d9ebb7fab0fe449487d912d8c416ffb0208c9e0889db721e1c" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.717014 4926 scope.go:117] "RemoveContainer" containerID="9b3870ae68423320d87cdd5c3a13a8bfb425ee7a43670235d9404ce8009e75cf" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.754224 4926 scope.go:117] "RemoveContainer" containerID="36d5a4029a03cefd233b081cf058512bd816348c086fb84f67bd9bc68e0d3f57" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.788190 4926 scope.go:117] "RemoveContainer" containerID="c7c18cc5dacf53993e8b01e0ffa88f9592749f627e6c3a7e0d872ab42c51668b" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.824741 4926 scope.go:117] "RemoveContainer" containerID="30cb023ac6bc185e0bdee42576a8e8ae67ff00d6c22cd65550bfcb64bfaf1066" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.874875 4926 scope.go:117] "RemoveContainer" containerID="fc4d40c0f9343c6adbc15c21e42091c214d9cc4e696c2eb9b10c24d263495568" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.891025 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.891142 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.892687 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"9b0fa6f26c858dc2369c662a34c73ee3a8d167eaa7efee6bd9a93dbf2aec4638"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Nov 25 19:26:56 crc kubenswrapper[4926]: I1125 19:26:56.892764 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" containerName="kube-state-metrics" containerID="cri-o://9b0fa6f26c858dc2369c662a34c73ee3a8d167eaa7efee6bd9a93dbf2aec4638" gracePeriod=30 Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.357735 4926 generic.go:334] "Generic (PLEG): container finished" podID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerID="9256f4f797c3335af8831fe0c369394beb4aade0b3688bbbb68f3281c30b3a10" exitCode=1 Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.357822 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerDied","Data":"9256f4f797c3335af8831fe0c369394beb4aade0b3688bbbb68f3281c30b3a10"} Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.357886 4926 scope.go:117] "RemoveContainer" containerID="892cf2c781f958010bdc1bb34e3c650396e61eefe06323dd35850c78848bcfd5" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.358941 4926 scope.go:117] "RemoveContainer" containerID="9256f4f797c3335af8831fe0c369394beb4aade0b3688bbbb68f3281c30b3a10" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.359455 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-9f2dg_openstack-operators(d992fc2a-a506-4c10-a8fa-1e3416074e73)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.364771 4926 generic.go:334] "Generic (PLEG): container finished" podID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" containerID="9b0fa6f26c858dc2369c662a34c73ee3a8d167eaa7efee6bd9a93dbf2aec4638" exitCode=2 Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.364883 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1","Type":"ContainerDied","Data":"9b0fa6f26c858dc2369c662a34c73ee3a8d167eaa7efee6bd9a93dbf2aec4638"} Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.372282 4926 scope.go:117] "RemoveContainer" containerID="f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.372776 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-647d45fc97-x65c4_openstack-operators(596d3616-ddec-489c-be4d-7e340f9e2acb)\"" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.378556 4926 scope.go:117] "RemoveContainer" containerID="6572a2edee824de02b34a454ca22774b90a9ee94eb95638a3b67ae6c1382a216" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.379270 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-gr8fl_openstack-operators(9dfe0bac-7a60-47c9-bef9-e34a75d23521)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" podUID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.398063 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8395389-762a-497d-972e-0987350a9a00" containerID="567036ec5d29a9c21b85ae7c2dfe2b5a83480567adc97226cd202ce7341b12d1" exitCode=1 Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.398130 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerDied","Data":"567036ec5d29a9c21b85ae7c2dfe2b5a83480567adc97226cd202ce7341b12d1"} Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.398982 4926 scope.go:117] "RemoveContainer" containerID="567036ec5d29a9c21b85ae7c2dfe2b5a83480567adc97226cd202ce7341b12d1" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.399283 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-hhl9b_openstack-operators(c8395389-762a-497d-972e-0987350a9a00)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.403598 4926 scope.go:117] "RemoveContainer" containerID="59c91cd3a58deaead5cbd150319e5b4641d4177fecc0afa006306d7cc8006cdf" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.404011 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-79c67b7c89-tcqww_openstack-operators(e15f3b97-0859-4f12-87cd-514fab3d75aa)\"" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.411559 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8322c05-5b96-4489-87a7-1677f90df80c" containerID="f890fe33e6583a0fa972bc38c82bcc52e7120f1cd489d1fea6cb9a0d529eead2" exitCode=1 Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.411663 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerDied","Data":"f890fe33e6583a0fa972bc38c82bcc52e7120f1cd489d1fea6cb9a0d529eead2"} Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.412317 4926 scope.go:117] "RemoveContainer" containerID="f890fe33e6583a0fa972bc38c82bcc52e7120f1cd489d1fea6cb9a0d529eead2" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.412789 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-8w6rx_openstack-operators(c8322c05-5b96-4489-87a7-1677f90df80c)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.419973 4926 scope.go:117] "RemoveContainer" containerID="6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.420538 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-gcvkp_openstack-operators(230b098e-8a89-417e-b5aa-994695273779)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.423279 4926 scope.go:117] "RemoveContainer" containerID="6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.423764 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_openstack-operators(c613eed5-f72e-4b4d-8283-5aa4e6241157)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.433715 4926 scope.go:117] "RemoveContainer" containerID="94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.434098 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-dxhsp_openstack-operators(421c1930-795c-4e93-9865-bff40d49ddf5)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.438340 4926 scope.go:117] "RemoveContainer" containerID="93237a57039805d075306d822cd016e0ca2c67ddf321408846ea844fd69c694e" Nov 25 19:26:57 crc kubenswrapper[4926]: E1125 19:26:57.438647 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5q59t_openstack-operators(3b14286a-e339-4bd3-835c-67287c341869)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" podUID="3b14286a-e339-4bd3-835c-67287c341869" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.460126 4926 scope.go:117] "RemoveContainer" containerID="c6948f393a9574f110453f8fe0025337a9d98251266c7a3c248197499afd3203" Nov 25 19:26:57 crc kubenswrapper[4926]: I1125 19:26:57.828649 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 19:26:58 crc kubenswrapper[4926]: I1125 19:26:58.204651 4926 scope.go:117] "RemoveContainer" containerID="ca8da6f6a7513d8c23b4eb82db40d9aa8d27584618441719813ab7e73e9b91cf" Nov 25 19:26:58 crc kubenswrapper[4926]: I1125 19:26:58.457760 4926 generic.go:334] "Generic (PLEG): container finished" podID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" containerID="b02c23f6c3ff03ae46e693d73d314d473a121630e5ea9f0d43083780ce5c1645" exitCode=1 Nov 25 19:26:58 crc kubenswrapper[4926]: I1125 19:26:58.457873 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1","Type":"ContainerDied","Data":"b02c23f6c3ff03ae46e693d73d314d473a121630e5ea9f0d43083780ce5c1645"} Nov 25 19:26:58 crc kubenswrapper[4926]: I1125 19:26:58.458315 4926 scope.go:117] "RemoveContainer" containerID="9b0fa6f26c858dc2369c662a34c73ee3a8d167eaa7efee6bd9a93dbf2aec4638" Nov 25 19:26:58 crc kubenswrapper[4926]: I1125 19:26:58.458446 4926 scope.go:117] "RemoveContainer" containerID="b02c23f6c3ff03ae46e693d73d314d473a121630e5ea9f0d43083780ce5c1645" Nov 25 19:26:58 crc kubenswrapper[4926]: I1125 19:26:58.463675 4926 scope.go:117] "RemoveContainer" containerID="59c91cd3a58deaead5cbd150319e5b4641d4177fecc0afa006306d7cc8006cdf" Nov 25 19:26:58 crc kubenswrapper[4926]: E1125 19:26:58.463982 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-79c67b7c89-tcqww_openstack-operators(e15f3b97-0859-4f12-87cd-514fab3d75aa)\"" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" Nov 25 19:26:59 crc kubenswrapper[4926]: I1125 19:26:59.480644 4926 generic.go:334] "Generic (PLEG): container finished" podID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" containerID="c53983334e80e320d21acae63250a70f6839d767157b1a1c8411be5f4edcd135" exitCode=1 Nov 25 19:26:59 crc kubenswrapper[4926]: I1125 19:26:59.480883 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1","Type":"ContainerDied","Data":"c53983334e80e320d21acae63250a70f6839d767157b1a1c8411be5f4edcd135"} Nov 25 19:26:59 crc kubenswrapper[4926]: I1125 19:26:59.481001 4926 scope.go:117] "RemoveContainer" containerID="b02c23f6c3ff03ae46e693d73d314d473a121630e5ea9f0d43083780ce5c1645" Nov 25 19:26:59 crc kubenswrapper[4926]: I1125 19:26:59.481584 4926 scope.go:117] "RemoveContainer" containerID="c53983334e80e320d21acae63250a70f6839d767157b1a1c8411be5f4edcd135" Nov 25 19:26:59 crc kubenswrapper[4926]: E1125 19:26:59.482010 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1)\"" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" Nov 25 19:27:00 crc kubenswrapper[4926]: I1125 19:27:00.396727 4926 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="37fcf979-578d-4b6f-8850-ca4e5d78b0cd" Nov 25 19:27:00 crc kubenswrapper[4926]: I1125 19:27:00.498964 4926 scope.go:117] "RemoveContainer" containerID="c53983334e80e320d21acae63250a70f6839d767157b1a1c8411be5f4edcd135" Nov 25 19:27:00 crc kubenswrapper[4926]: E1125 19:27:00.499396 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1)\"" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.045843 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.046973 4926 scope.go:117] "RemoveContainer" containerID="567036ec5d29a9c21b85ae7c2dfe2b5a83480567adc97226cd202ce7341b12d1" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.047258 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.047777 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-hhl9b_openstack-operators(c8395389-762a-497d-972e-0987350a9a00)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.063358 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.064448 4926 scope.go:117] "RemoveContainer" containerID="94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.064896 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-dxhsp_openstack-operators(421c1930-795c-4e93-9865-bff40d49ddf5)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.092693 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.092761 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.093781 4926 scope.go:117] "RemoveContainer" containerID="2b923303522d456ac34c475e103cfea66cd0f4fc2f1e1137ec8064d89ceb8efb" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.094164 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-bpsp8_openstack-operators(e949ca02-dbd2-4361-8b44-a498d1ec4c13)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.126818 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.126894 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.128062 4926 scope.go:117] "RemoveContainer" containerID="f890fe33e6583a0fa972bc38c82bcc52e7120f1cd489d1fea6cb9a0d529eead2" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.129632 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-8w6rx_openstack-operators(c8322c05-5b96-4489-87a7-1677f90df80c)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.171658 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.172512 4926 scope.go:117] "RemoveContainer" containerID="93237a57039805d075306d822cd016e0ca2c67ddf321408846ea844fd69c694e" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.172800 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5q59t_openstack-operators(3b14286a-e339-4bd3-835c-67287c341869)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" podUID="3b14286a-e339-4bd3-835c-67287c341869" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.181149 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.182035 4926 scope.go:117] "RemoveContainer" containerID="95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.182322 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-h55p4_openstack-operators(1df300a3-1d64-4e46-a0b5-9fe0bf029321)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podUID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.329226 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.330494 4926 scope.go:117] "RemoveContainer" containerID="8d0d215e93c8029df7d91d0fe4aad569513eda8016721dd49baf577e1142d463" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.452277 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.453018 4926 scope.go:117] "RemoveContainer" containerID="6572a2edee824de02b34a454ca22774b90a9ee94eb95638a3b67ae6c1382a216" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.453242 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-gr8fl_openstack-operators(9dfe0bac-7a60-47c9-bef9-e34a75d23521)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" podUID="9dfe0bac-7a60-47c9-bef9-e34a75d23521" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.509526 4926 scope.go:117] "RemoveContainer" containerID="567036ec5d29a9c21b85ae7c2dfe2b5a83480567adc97226cd202ce7341b12d1" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.509927 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-hhl9b_openstack-operators(c8395389-762a-497d-972e-0987350a9a00)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.674248 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.675209 4926 scope.go:117] "RemoveContainer" containerID="4979fe87f2fd4931e357ee3e36c5360952d0a23e11f2b29399523d4d20218c19" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.712869 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.712911 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.713554 4926 scope.go:117] "RemoveContainer" containerID="9256f4f797c3335af8831fe0c369394beb4aade0b3688bbbb68f3281c30b3a10" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.713784 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-9f2dg_openstack-operators(d992fc2a-a506-4c10-a8fa-1e3416074e73)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.723841 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.724543 4926 scope.go:117] "RemoveContainer" containerID="6b8522a4a4cbe9d8cf7a9dd27e22fe142c9911073fbec4a2fe1eb32cef2a7fe6" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.724779 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-k8j22_openstack-operators(74627669-e952-4db6-b082-5e7bd38b03b3)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.725041 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.782545 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.782961 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.783425 4926 scope.go:117] "RemoveContainer" containerID="ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.783841 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-mc5kd_openstack-operators(5885db97-a86c-482e-9851-2d8351dc0c3a)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.783863 4926 scope.go:117] "RemoveContainer" containerID="60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.784184 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-44shk_openstack-operators(5859a238-ed77-4ef7-ac69-295bd1c875c3)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.797270 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.798123 4926 scope.go:117] "RemoveContainer" containerID="6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.798422 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_openstack-operators(c613eed5-f72e-4b4d-8283-5aa4e6241157)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.826793 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.827640 4926 scope.go:117] "RemoveContainer" containerID="6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.827960 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-gcvkp_openstack-operators(230b098e-8a89-417e-b5aa-994695273779)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.851478 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.851944 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.852076 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.852814 4926 scope.go:117] "RemoveContainer" containerID="60bd041d44162b4f0845f7ace8cd6a6df4d0e8da6b46b381095802d1b47b68cb" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.853145 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-rslqc_openstack-operators(b4c6b194-9a8e-4cdb-a0e0-e67dce03328f)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.853604 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.853955 4926 scope.go:117] "RemoveContainer" containerID="f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.854182 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-647d45fc97-x65c4_openstack-operators(596d3616-ddec-489c-be4d-7e340f9e2acb)\"" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.879003 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.879053 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.879798 4926 scope.go:117] "RemoveContainer" containerID="aa8d292d9a2f1699aa3fe0e2acec83441c679322e771da63fb1ccbd52e907abb" Nov 25 19:27:01 crc kubenswrapper[4926]: E1125 19:27:01.880105 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rl7hc_openstack-operators(306a2bb2-20b9-436d-809a-55499e85e4d6)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.952338 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 19:27:01 crc kubenswrapper[4926]: I1125 19:27:01.998003 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7d958449d8-hxqgw" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.028185 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.294324 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.397657 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.534550 4926 generic.go:334] "Generic (PLEG): container finished" podID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerID="002926d95da20628f9254acfbe76f850920ed7cd34f8c17c15467fb490ffb243" exitCode=1 Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.534664 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerDied","Data":"002926d95da20628f9254acfbe76f850920ed7cd34f8c17c15467fb490ffb243"} Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.535029 4926 scope.go:117] "RemoveContainer" containerID="4979fe87f2fd4931e357ee3e36c5360952d0a23e11f2b29399523d4d20218c19" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.536346 4926 scope.go:117] "RemoveContainer" containerID="002926d95da20628f9254acfbe76f850920ed7cd34f8c17c15467fb490ffb243" Nov 25 19:27:02 crc kubenswrapper[4926]: E1125 19:27:02.537050 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-s4wxr_openstack-operators(f9d1a5dc-de6e-45fa-ab5d-1de529f40894)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.542144 4926 generic.go:334] "Generic (PLEG): container finished" podID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerID="836a14fd28bef75f97d0f71df6de974566af39efac23e0037f06b98e162f7490" exitCode=1 Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.542239 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerDied","Data":"836a14fd28bef75f97d0f71df6de974566af39efac23e0037f06b98e162f7490"} Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.543358 4926 scope.go:117] "RemoveContainer" containerID="6b8522a4a4cbe9d8cf7a9dd27e22fe142c9911073fbec4a2fe1eb32cef2a7fe6" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.543437 4926 scope.go:117] "RemoveContainer" containerID="60bd041d44162b4f0845f7ace8cd6a6df4d0e8da6b46b381095802d1b47b68cb" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.543552 4926 scope.go:117] "RemoveContainer" containerID="836a14fd28bef75f97d0f71df6de974566af39efac23e0037f06b98e162f7490" Nov 25 19:27:02 crc kubenswrapper[4926]: E1125 19:27:02.543834 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-k8j22_openstack-operators(74627669-e952-4db6-b082-5e7bd38b03b3)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" Nov 25 19:27:02 crc kubenswrapper[4926]: E1125 19:27:02.543858 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-rslqc_openstack-operators(b4c6b194-9a8e-4cdb-a0e0-e67dce03328f)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" Nov 25 19:27:02 crc kubenswrapper[4926]: E1125 19:27:02.544107 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-89dkl_openstack-operators(b70bd0b1-5555-49f4-ae5f-dfeebd005029)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.546571 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.609038 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.643498 4926 scope.go:117] "RemoveContainer" containerID="8d0d215e93c8029df7d91d0fe4aad569513eda8016721dd49baf577e1142d463" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.709442 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.709551 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.801872 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 19:27:02 crc kubenswrapper[4926]: I1125 19:27:02.823978 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.042311 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.043194 4926 scope.go:117] "RemoveContainer" containerID="ac7fe094232b30f96f7d3cb92f2bd9e120f268fba1447365cbdad1cda1c2cf02" Nov 25 19:27:03 crc kubenswrapper[4926]: E1125 19:27:03.043755 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-l4vqr_openstack-operators(c67a3051-deee-4c35-b2fd-73f0f96ccbac)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.118956 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-vhkn7" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.247775 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.291888 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.335832 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.336474 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:27:03 crc kubenswrapper[4926]: E1125 19:27:03.336940 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.420135 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.420721 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.422767 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-wcjpv" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.436112 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.454655 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.511392 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.603245 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.669434 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-sdwnd" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.833249 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.854726 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.929668 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 19:27:03 crc kubenswrapper[4926]: I1125 19:27:03.982312 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.085658 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.178890 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.220237 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.345944 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.368026 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.419815 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.457857 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.532857 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.548049 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.582855 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.618871 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.846460 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.900830 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.925829 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-9xpwm" Nov 25 19:27:04 crc kubenswrapper[4926]: I1125 19:27:04.926721 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.002758 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-69jjt" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.079338 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.084158 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.105547 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.133175 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.307021 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.338803 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.347325 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.493411 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.601679 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.607442 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.751751 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.824938 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.840506 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.851497 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fd8k7" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.861212 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.878146 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.922338 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.958122 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-8btdl" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.965701 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 19:27:05 crc kubenswrapper[4926]: I1125 19:27:05.978231 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.008221 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.127717 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.139919 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.220687 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.261299 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.354254 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.468229 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.502861 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.519840 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.532959 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.623074 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-pxc6r" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.716202 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.727156 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.731085 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.829753 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.832253 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.835423 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2sclw" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.867791 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.883263 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.883308 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.884304 4926 scope.go:117] "RemoveContainer" containerID="c53983334e80e320d21acae63250a70f6839d767157b1a1c8411be5f4edcd135" Nov 25 19:27:06 crc kubenswrapper[4926]: E1125 19:27:06.884760 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1)\"" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.949585 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 19:27:06 crc kubenswrapper[4926]: I1125 19:27:06.966268 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.064798 4926 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.172036 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.307081 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.323920 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-wzmlf" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.330237 4926 scope.go:117] "RemoveContainer" containerID="0d5242e3c4c545ee810b5f3ca621a357599eee5a9425cdfdc99f462fce8850aa" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.340066 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.350323 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.420961 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.425450 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.467032 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.500519 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.503865 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.513283 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.575822 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.580945 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.589198 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.595001 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.608053 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" event={"ID":"2ac11a24-0681-41d4-b943-8bf5b5396a40","Type":"ContainerStarted","Data":"92a989cc611e26fd987427480fd7082165a049e39c5714e5bc249b8d70b26db6"} Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.608205 4926 scope.go:117] "RemoveContainer" containerID="c53983334e80e320d21acae63250a70f6839d767157b1a1c8411be5f4edcd135" Nov 25 19:27:07 crc kubenswrapper[4926]: E1125 19:27:07.608447 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1)\"" pod="openstack/kube-state-metrics-0" podUID="e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.615274 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.632448 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.644337 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.648492 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.648546 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.653955 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.656126 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.710108 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.737441 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.768806 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.815494 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.826711 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.857126 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.880585 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.899938 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.935240 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-cj29j" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.960571 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-98qn6" Nov 25 19:27:07 crc kubenswrapper[4926]: I1125 19:27:07.976886 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.020539 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.043788 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.085169 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.126425 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.132002 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.151207 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.153600 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.193435 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-tflgh" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.210778 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.242458 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.243680 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.302175 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.310279 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.318736 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.330566 4926 scope.go:117] "RemoveContainer" containerID="7ad17dbc1670ab6c5beada7490a88c288ef145ee7c5a6b5d604ce12ed26800e5" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.350539 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.351938 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.364010 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.365978 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.396611 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-2xr6z" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.448412 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.453220 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.498960 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.512148 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.530877 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.539725 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.562619 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.615715 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.650884 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.670679 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-v9t8w" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.673646 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.753006 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.829274 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.832134 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.848764 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.898801 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.902159 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.935483 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.978519 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-gjqgn" Nov 25 19:27:08 crc kubenswrapper[4926]: I1125 19:27:08.996567 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.055076 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.063517 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.068843 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.115680 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.115882 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.126424 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.142946 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.143211 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.159699 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-rr2qd" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.165563 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.189473 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-s44c6" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.236737 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.251507 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.300781 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.302991 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.330031 4926 scope.go:117] "RemoveContainer" containerID="59c91cd3a58deaead5cbd150319e5b4641d4177fecc0afa006306d7cc8006cdf" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.338769 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.359757 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.362619 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-rqwjb" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.364750 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.369124 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.374667 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.380295 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.392309 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.457200 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.481975 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.493218 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.493592 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.495172 4926 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.506730 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.553430 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-pktpc" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.554069 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-xg9bb" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.556912 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.580404 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.609335 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.622250 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.627628 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-v9gvn" event={"ID":"abc17280-a647-4d60-8a1a-d01505970238","Type":"ContainerStarted","Data":"3221ee366dbec2f5ae24d16a579286e5ad32cb112e72a68935cec81752ef47a1"} Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.692646 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.714171 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.767858 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.778946 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.814082 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.815142 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.846638 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.856313 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.858309 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.898706 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-bsltd" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.905119 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-qxv8d" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.908855 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-44z7v" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.915796 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.946888 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.961397 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-wrs5l" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.965065 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.990865 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 19:27:09 crc kubenswrapper[4926]: I1125 19:27:09.995399 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.030119 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.055748 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.081011 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-w98f9" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.114085 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.118689 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.121717 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.124024 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.125321 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.125413 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-8sqwv" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.126259 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.130726 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.135240 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-xl5tr" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.144959 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.184628 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-7mfxv" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.213314 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.234707 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.334077 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.357891 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.361896 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.401648 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.434124 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.437040 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.448651 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-fn7fb" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.449661 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.539722 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-vlb2p" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.540539 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-q6cdz" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.570571 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.597484 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.613304 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.640028 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.684786 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.709033 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.730832 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.775237 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.783360 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.815958 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.836665 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.846242 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.849912 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 19:27:10 crc kubenswrapper[4926]: I1125 19:27:10.950260 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.029609 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.065063 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.071626 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mzhrc" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.081265 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.119666 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.153659 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.181260 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-8vc99" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.185283 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.205165 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.247347 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.269164 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.329009 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.329631 4926 scope.go:117] "RemoveContainer" containerID="836a14fd28bef75f97d0f71df6de974566af39efac23e0037f06b98e162f7490" Nov 25 19:27:11 crc kubenswrapper[4926]: E1125 19:27:11.329900 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-89dkl_openstack-operators(b70bd0b1-5555-49f4-ae5f-dfeebd005029)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.389929 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.481698 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.530003 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.546595 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.571540 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.632309 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.638568 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.651121 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" event={"ID":"e15f3b97-0859-4f12-87cd-514fab3d75aa","Type":"ContainerStarted","Data":"0245d89c76fd9836f7c9b83609c1ba6d406550929157c938dd3f8d4a39981dfc"} Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.651523 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.674674 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.675951 4926 scope.go:117] "RemoveContainer" containerID="002926d95da20628f9254acfbe76f850920ed7cd34f8c17c15467fb490ffb243" Nov 25 19:27:11 crc kubenswrapper[4926]: E1125 19:27:11.676484 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-s4wxr_openstack-operators(f9d1a5dc-de6e-45fa-ab5d-1de529f40894)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.684980 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-xbhks" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.703229 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.758804 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.760914 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.777457 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.787505 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.851629 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.875156 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.894041 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.928522 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-jz7bq" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.932637 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-config-data" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.938112 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.968583 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.983661 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-5db2z" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.988674 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 19:27:11 crc kubenswrapper[4926]: I1125 19:27:11.989736 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.040452 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.115776 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.144455 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.148361 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.205994 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.208848 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.242451 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.260518 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.270002 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.281898 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2fxt6" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.304234 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.329547 4926 scope.go:117] "RemoveContainer" containerID="9256f4f797c3335af8831fe0c369394beb4aade0b3688bbbb68f3281c30b3a10" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.330253 4926 scope.go:117] "RemoveContainer" containerID="93237a57039805d075306d822cd016e0ca2c67ddf321408846ea844fd69c694e" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.342095 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.380050 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.409831 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.486405 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.488974 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.540079 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.653437 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.679616 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.679636 4926 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.684879 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.788363 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.790597 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-dfq27" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.881868 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 19:27:12 crc kubenswrapper[4926]: I1125 19:27:12.968966 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.009691 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.021550 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.031283 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-fhcgp" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.041985 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.042803 4926 scope.go:117] "RemoveContainer" containerID="ac7fe094232b30f96f7d3cb92f2bd9e120f268fba1447365cbdad1cda1c2cf02" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.062732 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.066148 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.067522 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.075942 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.108696 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.108753 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.123863 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.177355 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.191731 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.201496 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-4rp7p" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.215093 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-hxjds" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.261320 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.269276 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.272719 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.329577 4926 scope.go:117] "RemoveContainer" containerID="f890fe33e6583a0fa972bc38c82bcc52e7120f1cd489d1fea6cb9a0d529eead2" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.329663 4926 scope.go:117] "RemoveContainer" containerID="2b923303522d456ac34c475e103cfea66cd0f4fc2f1e1137ec8064d89ceb8efb" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.353894 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.393976 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-s4nxq" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.422296 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.454695 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.454925 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-c6pk9" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.477515 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.545228 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-2-config-data" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.553444 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.577226 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.578506 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.649945 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-vvtzv" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.652047 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.674009 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" event={"ID":"3b14286a-e339-4bd3-835c-67287c341869","Type":"ContainerStarted","Data":"f7100af4a80781fe600a99590fa299e9c87d30096bb5fe4fa7ec54d6a3fc80aa"} Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.676063 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.676120 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerStarted","Data":"cc5ae4a3c37a043d840d4e31b62bb3ae61c3b9659b55dfc49d3cb47098884770"} Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.676445 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.678733 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerStarted","Data":"7b8b99244ccc8dd316b071e2b038d28d235bcd7226b1b5758497e7b989466252"} Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.679032 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.736912 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.758198 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.809027 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.820247 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.837775 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.847253 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.860080 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.899462 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.929938 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 19:27:13 crc kubenswrapper[4926]: I1125 19:27:13.961946 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.023618 4926 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.028008 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.034867 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.170646 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.273554 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.316185 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.329596 4926 scope.go:117] "RemoveContainer" containerID="6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.329866 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.330086 4926 scope.go:117] "RemoveContainer" containerID="94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64" Nov 25 19:27:14 crc kubenswrapper[4926]: E1125 19:27:14.330191 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.330202 4926 scope.go:117] "RemoveContainer" containerID="567036ec5d29a9c21b85ae7c2dfe2b5a83480567adc97226cd202ce7341b12d1" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.332299 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.336306 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.374808 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.383900 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-4dgcp" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.470767 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.617402 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.626272 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.646289 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-zg5ps" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.660101 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.666811 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.674247 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.691419 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerStarted","Data":"8e442d849e1e2a7793d95f2aecdf99cf08c6d3bdf8a01c69e31cde4c7a068182"} Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.691929 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.693583 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerStarted","Data":"d6a6c8972ffb22195afabc8abbbe57836951599491f7d56e0f1ccb85fe5c1408"} Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.694519 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.702824 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.721406 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.795099 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.804401 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.863767 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.878505 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-r689z" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.914918 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 19:27:14 crc kubenswrapper[4926]: I1125 19:27:14.949284 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-9xmqj" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.056299 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-bn9q5" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.083351 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.088822 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.135097 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.187187 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.225278 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.252960 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.303445 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.328959 4926 scope.go:117] "RemoveContainer" containerID="6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.329251 4926 scope.go:117] "RemoveContainer" containerID="6b8522a4a4cbe9d8cf7a9dd27e22fe142c9911073fbec4a2fe1eb32cef2a7fe6" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.329284 4926 scope.go:117] "RemoveContainer" containerID="f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.329525 4926 scope.go:117] "RemoveContainer" containerID="60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.365832 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.370449 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.453627 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-28tsb" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.453801 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.455836 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.521400 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.531241 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.571860 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.576005 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.579588 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.640349 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.702518 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.704961 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerStarted","Data":"32ce552b151021b6302d6d70852291265aaa06e5ce86abc1d75fbbef7d884d29"} Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.705293 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.708738 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerStarted","Data":"6f4d587a7f0be14a01e484d73dafb5cd608c621478dd0067188f227ebf2c91b9"} Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.708954 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.711951 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerStarted","Data":"4b483b263c9f017e85b230b326ec16e8c90af374a11b1481921a6959aaf93983"} Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.712107 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.746858 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.748341 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-7gv72" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.805024 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.828532 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.928272 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 19:27:15 crc kubenswrapper[4926]: I1125 19:27:15.945989 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.124726 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.168908 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.246773 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.264645 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.329481 4926 scope.go:117] "RemoveContainer" containerID="aa8d292d9a2f1699aa3fe0e2acec83441c679322e771da63fb1ccbd52e907abb" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.330242 4926 scope.go:117] "RemoveContainer" containerID="95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.330473 4926 scope.go:117] "RemoveContainer" containerID="6572a2edee824de02b34a454ca22774b90a9ee94eb95638a3b67ae6c1382a216" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.330526 4926 scope.go:117] "RemoveContainer" containerID="ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.413561 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-8294s" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.419985 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.426677 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.570043 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.685507 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.733242 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerStarted","Data":"040b794c193aaeaf7ac85bd6ea6573033270c8eb8ca274032f9644dcdfa95dda"} Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.734462 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.737236 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerStarted","Data":"43a657ed2829dff77f3a0976877d8f91c0b277f5caafb10d001b679b39eaefa0"} Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.737437 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.740295 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerStarted","Data":"aa066fd86e6abde2868c57032c9eacb76bfcb557b69603b3b907d9c06860a2e3"} Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.740457 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.742865 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerStarted","Data":"fa80cb586a41197fa2dcef6cf05fc2343aa6d0d9d04229ca9f13d5b01ce4b614"} Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.743436 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.765686 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-8spnr" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.783735 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.796668 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-zt2sz" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.820432 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.826996 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.883865 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-vt7dx" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.891873 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.908774 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.930253 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.935718 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 19:27:16 crc kubenswrapper[4926]: I1125 19:27:16.990317 4926 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.006683 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.006736 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.007093 4926 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.007119 4926 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="96ec25f3-0d9b-41f8-b8fb-6e25e31dc95f" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.071094 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.097042 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.193587 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.329390 4926 scope.go:117] "RemoveContainer" containerID="60bd041d44162b4f0845f7ace8cd6a6df4d0e8da6b46b381095802d1b47b68cb" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.333893 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.335066 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.382459 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.403973 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-v2szw" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.404419 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.460758 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.478893 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.656526 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.668029 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.688724 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.696120 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.705446 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.754548 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerStarted","Data":"e6d6051fdf0766ea2e164abcf85bc87a48912a46c04d51951693779ba109cb5a"} Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.754866 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.757148 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" event={"ID":"9dfe0bac-7a60-47c9-bef9-e34a75d23521","Type":"ContainerStarted","Data":"c0a29a9751c4678ad87881025d1fef4bdaa3e386b57060613fc74ba924357832"} Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.757585 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.760495 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerStarted","Data":"5d1b9cc2aa619d9343e4589553dafb3573017f169337089252045fff5aa5b226"} Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.760701 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.763529 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerStarted","Data":"90e480ee20789985c1d438054b61ed9347d16bedf8e8859886ec3ec41a70fe70"} Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.763778 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.766633 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerStarted","Data":"a7057aba1403ea8400ad73a18f95d561a059ed967a35a51a3cf7dec5bcb60d2b"} Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.772461 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.773431 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=26.773410727 podStartE2EDuration="26.773410727s" podCreationTimestamp="2025-11-25 19:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 19:27:17.095975269 +0000 UTC m=+4467.481488884" watchObservedRunningTime="2025-11-25 19:27:17.773410727 +0000 UTC m=+4468.158924342" Nov 25 19:27:17 crc kubenswrapper[4926]: I1125 19:27:17.837605 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.063076 4926 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.064180 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.089347 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.095654 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.129283 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-n866l" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.280715 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.370318 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.388469 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.394574 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-g6hs9" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.396991 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-zx2dc" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.572815 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.581205 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.608695 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.650522 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.661760 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.673488 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.689590 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.782766 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.845051 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-r6pjt" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.868148 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.871912 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 19:27:18 crc kubenswrapper[4926]: I1125 19:27:18.969695 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.021740 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.156435 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.202156 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.301660 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-2jdbr" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.532213 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.533059 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.574006 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-7ss9r" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.643459 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.650309 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.819941 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 19:27:19 crc kubenswrapper[4926]: I1125 19:27:19.831514 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 19:27:20 crc kubenswrapper[4926]: I1125 19:27:20.337699 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 19:27:20 crc kubenswrapper[4926]: I1125 19:27:20.562155 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 25 19:27:20 crc kubenswrapper[4926]: I1125 19:27:20.607590 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 19:27:20 crc kubenswrapper[4926]: I1125 19:27:20.710591 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.049307 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.069794 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.096938 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.137888 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.180113 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5q59t" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.184669 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.265220 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.329827 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.330254 4926 scope.go:117] "RemoveContainer" containerID="836a14fd28bef75f97d0f71df6de974566af39efac23e0037f06b98e162f7490" Nov 25 19:27:21 crc kubenswrapper[4926]: E1125 19:27:21.330542 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-89dkl_openstack-operators(b70bd0b1-5555-49f4-ae5f-dfeebd005029)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.455626 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-gr8fl" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.468970 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-2jhmx" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.674065 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.675305 4926 scope.go:117] "RemoveContainer" containerID="002926d95da20628f9254acfbe76f850920ed7cd34f8c17c15467fb490ffb243" Nov 25 19:27:21 crc kubenswrapper[4926]: E1125 19:27:21.675851 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-s4wxr_openstack-operators(f9d1a5dc-de6e-45fa-ab5d-1de529f40894)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.716483 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.726968 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.786216 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.795682 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.814497 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.833354 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.871089 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.878722 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:27:21 crc kubenswrapper[4926]: I1125 19:27:21.883366 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:27:22 crc kubenswrapper[4926]: I1125 19:27:22.329172 4926 scope.go:117] "RemoveContainer" containerID="c53983334e80e320d21acae63250a70f6839d767157b1a1c8411be5f4edcd135" Nov 25 19:27:22 crc kubenswrapper[4926]: I1125 19:27:22.359011 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 19:27:23 crc kubenswrapper[4926]: I1125 19:27:23.050020 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:27:24 crc kubenswrapper[4926]: I1125 19:27:24.847895 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1","Type":"ContainerStarted","Data":"cbafd1cd7a240bfe6060458dabe212f39df4e1f751d1b9e474fe0eadc4ae5c01"} Nov 25 19:27:25 crc kubenswrapper[4926]: I1125 19:27:25.329758 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:27:25 crc kubenswrapper[4926]: E1125 19:27:25.330357 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:27:25 crc kubenswrapper[4926]: I1125 19:27:25.578266 4926 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 19:27:25 crc kubenswrapper[4926]: I1125 19:27:25.578710 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://3d6b720a9501bb6cfb724cb864d4f20835ad4dfe5be7ee8dde81357134684ff5" gracePeriod=5 Nov 25 19:27:25 crc kubenswrapper[4926]: I1125 19:27:25.870151 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 19:27:30 crc kubenswrapper[4926]: I1125 19:27:30.964045 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 19:27:30 crc kubenswrapper[4926]: I1125 19:27:30.964715 4926 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="3d6b720a9501bb6cfb724cb864d4f20835ad4dfe5be7ee8dde81357134684ff5" exitCode=137 Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.727799 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.727871 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.852171 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.852294 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.852470 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.852474 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.852526 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.852658 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.852595 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.853190 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.853909 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.855054 4926 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.855097 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.855114 4926 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.855140 4926 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.855166 4926 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.863276 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.957031 4926 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.981440 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.981522 4926 scope.go:117] "RemoveContainer" containerID="3d6b720a9501bb6cfb724cb864d4f20835ad4dfe5be7ee8dde81357134684ff5" Nov 25 19:27:31 crc kubenswrapper[4926]: I1125 19:27:31.981615 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 19:27:32 crc kubenswrapper[4926]: I1125 19:27:32.348528 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 19:27:33 crc kubenswrapper[4926]: I1125 19:27:33.329599 4926 scope.go:117] "RemoveContainer" containerID="836a14fd28bef75f97d0f71df6de974566af39efac23e0037f06b98e162f7490" Nov 25 19:27:35 crc kubenswrapper[4926]: I1125 19:27:35.013361 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerStarted","Data":"8b5dd2182818ac145a99a2045749a6e0e27c8d642274a20036425a32d8d2622a"} Nov 25 19:27:35 crc kubenswrapper[4926]: I1125 19:27:35.014032 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:27:36 crc kubenswrapper[4926]: I1125 19:27:36.329268 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:27:36 crc kubenswrapper[4926]: I1125 19:27:36.330240 4926 scope.go:117] "RemoveContainer" containerID="002926d95da20628f9254acfbe76f850920ed7cd34f8c17c15467fb490ffb243" Nov 25 19:27:36 crc kubenswrapper[4926]: E1125 19:27:36.330354 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:27:36 crc kubenswrapper[4926]: I1125 19:27:36.910052 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 19:27:37 crc kubenswrapper[4926]: I1125 19:27:37.035056 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerStarted","Data":"97fbb0ff8b6683c6dcc524bd8f0de3303e143a5b7983fd2822dd0b356f9b2223"} Nov 25 19:27:37 crc kubenswrapper[4926]: I1125 19:27:37.035322 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:27:41 crc kubenswrapper[4926]: I1125 19:27:41.332929 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:27:41 crc kubenswrapper[4926]: I1125 19:27:41.678087 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:27:42 crc kubenswrapper[4926]: I1125 19:27:42.923667 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tsnq2"] Nov 25 19:27:42 crc kubenswrapper[4926]: E1125 19:27:42.924478 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" containerName="installer" Nov 25 19:27:42 crc kubenswrapper[4926]: I1125 19:27:42.924498 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" containerName="installer" Nov 25 19:27:42 crc kubenswrapper[4926]: E1125 19:27:42.924552 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 19:27:42 crc kubenswrapper[4926]: I1125 19:27:42.924562 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 19:27:42 crc kubenswrapper[4926]: I1125 19:27:42.924808 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b1a7cbf-373e-4fe0-9cec-7ff7def8739a" containerName="installer" Nov 25 19:27:42 crc kubenswrapper[4926]: I1125 19:27:42.924824 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 19:27:42 crc kubenswrapper[4926]: I1125 19:27:42.926711 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:42 crc kubenswrapper[4926]: I1125 19:27:42.938883 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tsnq2"] Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.091860 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvzfs\" (UniqueName: \"kubernetes.io/projected/164bd684-7cad-4fd5-ae07-32ca3fd631ff-kube-api-access-cvzfs\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.092121 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-catalog-content\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.092405 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-utilities\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.194456 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvzfs\" (UniqueName: \"kubernetes.io/projected/164bd684-7cad-4fd5-ae07-32ca3fd631ff-kube-api-access-cvzfs\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.194511 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-catalog-content\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.194588 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-utilities\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.195036 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-utilities\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.195128 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-catalog-content\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.211413 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvzfs\" (UniqueName: \"kubernetes.io/projected/164bd684-7cad-4fd5-ae07-32ca3fd631ff-kube-api-access-cvzfs\") pod \"redhat-marketplace-tsnq2\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.317067 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.564188 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5c55bddd9c-5nmb8" Nov 25 19:27:43 crc kubenswrapper[4926]: I1125 19:27:43.890562 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tsnq2"] Nov 25 19:27:44 crc kubenswrapper[4926]: I1125 19:27:44.124591 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerStarted","Data":"8ccebb53f699a1e03d8484b41d05d8acbdc2f1a87314521a520f725811622042"} Nov 25 19:27:48 crc kubenswrapper[4926]: I1125 19:27:48.622562 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:49 crc kubenswrapper[4926]: I1125 19:27:49.975024 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jpdcl"] Nov 25 19:27:49 crc kubenswrapper[4926]: I1125 19:27:49.977880 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:49 crc kubenswrapper[4926]: I1125 19:27:49.992303 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpdcl"] Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.039854 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-utilities\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.039945 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-catalog-content\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.039974 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zq8r\" (UniqueName: \"kubernetes.io/projected/63cf85c5-12a7-4265-ae81-e968e686668b-kube-api-access-2zq8r\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.142576 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-utilities\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.142703 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-catalog-content\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.142741 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zq8r\" (UniqueName: \"kubernetes.io/projected/63cf85c5-12a7-4265-ae81-e968e686668b-kube-api-access-2zq8r\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.143924 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-catalog-content\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.144120 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-utilities\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.162221 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zq8r\" (UniqueName: \"kubernetes.io/projected/63cf85c5-12a7-4265-ae81-e968e686668b-kube-api-access-2zq8r\") pod \"redhat-marketplace-jpdcl\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.312916 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.342945 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:27:50 crc kubenswrapper[4926]: E1125 19:27:50.343515 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:27:50 crc kubenswrapper[4926]: I1125 19:27:50.823514 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpdcl"] Nov 25 19:27:51 crc kubenswrapper[4926]: I1125 19:27:51.203580 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpdcl" event={"ID":"63cf85c5-12a7-4265-ae81-e968e686668b","Type":"ContainerStarted","Data":"13272c45fcbec3780642d447737257cbdd2f08e0245e0e66d6441ce70d34cd11"} Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.087577 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.128630 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.170652 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.211665 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.795561 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.795569 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.918677 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:52 crc kubenswrapper[4926]: I1125 19:27:52.919127 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:53 crc kubenswrapper[4926]: I1125 19:27:53.000642 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:53 crc kubenswrapper[4926]: I1125 19:27:53.000732 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:53 crc kubenswrapper[4926]: I1125 19:27:53.042589 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:53 crc kubenswrapper[4926]: I1125 19:27:53.042794 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:53 crc kubenswrapper[4926]: I1125 19:27:53.778473 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 19:27:58 crc kubenswrapper[4926]: I1125 19:27:58.662597 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:58 crc kubenswrapper[4926]: I1125 19:27:58.662670 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:58 crc kubenswrapper[4926]: I1125 19:27:58.790609 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Nov 25 19:27:58 crc kubenswrapper[4926]: I1125 19:27:58.791172 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 19:27:59 crc kubenswrapper[4926]: I1125 19:27:59.379566 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:27:59 crc kubenswrapper[4926]: I1125 19:27:59.379939 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:27:59 crc kubenswrapper[4926]: I1125 19:27:59.379618 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:27:59 crc kubenswrapper[4926]: I1125 19:27:59.380180 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:02 crc kubenswrapper[4926]: I1125 19:28:02.213571 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:02 crc kubenswrapper[4926]: I1125 19:28:02.213571 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:02 crc kubenswrapper[4926]: I1125 19:28:02.296588 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:02 crc kubenswrapper[4926]: I1125 19:28:02.329058 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:28:02 crc kubenswrapper[4926]: E1125 19:28:02.329301 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.380569 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.380577 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.380629 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.380945 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.463559 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.463569 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.463763 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.842559 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.842563 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.926496 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.926555 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:02.967620 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.173529 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.173529 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.214579 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.591771 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.592310 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.592823 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.592903 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593002 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593025 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593066 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593040 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593049 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593100 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593267 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593268 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593332 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.593414 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.786617 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.786686 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.787887 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-central-agent" containerStatusID={"Type":"cri-o","ID":"40b3aa50db332d427a84aa93a155df94b23951f83d0bc8e8aa18b22c3b3bc76a"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-central-agent failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:03.787951 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" containerID="cri-o://40b3aa50db332d427a84aa93a155df94b23951f83d0bc8e8aa18b22c3b3bc76a" gracePeriod=30 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:04.903581 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:04.903633 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:07.428106 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:07.428636 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:07.430016 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:07.430058 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:08.622565 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:08.622681 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:08.869601 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:09.380500 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:09.380506 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:09.380543 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:09.380563 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:09.664545 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:09.904025 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/livez?exclude=etcd\": context deadline exceeded" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:09.904108 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/livez?exclude=etcd\": context deadline exceeded" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.130575 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.130578 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.131350 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.132088 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.172664 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.172856 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.215843 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.215957 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.371970 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.756564 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.756584 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.756687 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.798902 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.799009 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.925628 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.967526 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.967636 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.967537 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:12.968611 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.134505 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.134633 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.134503 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.135737 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.135832 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.135845 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.135601 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.138817 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.138988 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.218650 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.219208 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.302086 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.301597 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.329576 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:13.329858 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.799571 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.841593 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:13.899246 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": read tcp 10.217.0.2:45096->10.217.0.90:8081: read: connection reset by peer" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:14.050782 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:14.051172 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:14.263902 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:14.263931 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:14.263947 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:14.904175 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:14.904235 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:17.428110 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:17.428634 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:17.428132 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:17.428721 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:18.663512 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:18.663512 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:18.910535 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:18.910559 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.381591 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.381638 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.381647 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.381693 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.381728 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.381755 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.382670 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="operator" containerStatusID={"Type":"cri-o","ID":"da96c067b6d67039a17a9d6a91f7d69b8a90a06213b76db7f91fd2c24e0468dc"} pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" containerMessage="Container operator failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.382709 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" containerID="cri-o://da96c067b6d67039a17a9d6a91f7d69b8a90a06213b76db7f91fd2c24e0468dc" gracePeriod=30 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.904450 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/livez?exclude=etcd\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:19.904762 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/livez?exclude=etcd\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:20.424519 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:20.424578 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:20.773668 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:20.773659 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:21.878011 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/healthz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:21.878193 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.210716 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.210795 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.292525 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.376535 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.376544 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.376559 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.376655 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.459549 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.459578 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.459898 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.459985 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.841551 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.841558 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.841738 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.925559 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:22.925563 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.050549 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.175564 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.175566 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.259584 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.509548 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.509594 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.509752 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.509839 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.509893 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.509900 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.509934 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.510223 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.510266 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.510323 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.510415 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.510666 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.510714 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.568148 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.568213 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.568684 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.84:8081/readyz\": dial tcp 10.217.0.84:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:23.883587 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:24.595540 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:24.595911 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:24.905197 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:24.905243 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:24.905428 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:26.335349 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:26.337460 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:27.429102 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:27.429431 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:27.429472 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:27.430164 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller-manager" containerStatusID={"Type":"cri-o","ID":"0d9f56399528f654f4e3fca7a64031d4b909cad493dd7f216a1e4b66acd6b753"} pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" containerMessage="Container controller-manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:27.430191 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" containerID="cri-o://0d9f56399528f654f4e3fca7a64031d4b909cad493dd7f216a1e4b66acd6b753" gracePeriod=30 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:27.429174 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:27.430257 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:28.622575 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:28.778794 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:28.875547 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:28.875645 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.338543 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.338621 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.600545 4926 patch_prober.go:28] interesting pod/perses-operator-5446b9c989-cq9q8 container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.45:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.600575 4926 patch_prober.go:28] interesting pod/perses-operator-5446b9c989-cq9q8 container/perses-operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.45:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.600608 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" podUID="42a2f9e2-7492-45ef-9049-b617d5c1c36d" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.45:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.600643 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" podUID="42a2f9e2-7492-45ef-9049-b617d5c1c36d" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.45:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.917618 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/livez?exclude=etcd\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.917912 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/livez?exclude=etcd\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.917612 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.917958 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.918108 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.918872 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="openshift-apiserver" containerStatusID={"Type":"cri-o","ID":"9435cbb74905b6ef93c6b440d7d9dc2e06bd13078770eb2d09f7981918b491a3"} pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" containerMessage="Container openshift-apiserver failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:29.918920 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" containerID="cri-o://9435cbb74905b6ef93c6b440d7d9dc2e06bd13078770eb2d09f7981918b491a3" gracePeriod=120 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:30.773190 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:30.774275 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:31.878507 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.130541 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.130595 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.172385 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.214589 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.370494 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.758612 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.758601 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.936572 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.936595 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:32.978600 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:33.020536 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:33.020844 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:33.103562 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:33.103875 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:33.104547 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:33.104564 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.84:8081/readyz\": dial tcp 10.217.0.84:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:33.104603 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.84:8081/healthz\": dial tcp 10.217.0.84:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:34.905959 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:34.906342 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:35.510344 4926 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-gmzsn container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.61:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:35.510414 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-66df7c8f76-gmzsn" podUID="a90bb080-0480-4a5c-9fee-93684738e0cf" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.61:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:37.428660 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:37.429043 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:38.663577 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:38.663924 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:38.663619 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:38.664880 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"32052467095ba9616c24c37332fe65479a6eaefedf898c4e9da34e2ff93b4abd"} pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:38.664942 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" containerID="cri-o://32052467095ba9616c24c37332fe65479a6eaefedf898c4e9da34e2ff93b4abd" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:38.911530 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:38.911517 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" podUID="e15f3b97-0859-4f12-87cd-514fab3d75aa" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:39.339550 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:39.339604 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:39.558518 4926 patch_prober.go:28] interesting pod/perses-operator-5446b9c989-cq9q8 container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.45:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:39.558579 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5446b9c989-cq9q8" podUID="42a2f9e2-7492-45ef-9049-b617d5c1c36d" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.45:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:39.706507 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.94:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:40.335391 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:40.337626 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:40.773470 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:40.773565 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:40.773832 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:40.773923 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:40.774283 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9"} pod="openstack/openstack-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:41.772598 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" probeResult="failure" output="command timed out" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:41.878154 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:41.878157 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/healthz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:41.878233 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:41.878761 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:41.878878 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"a7057aba1403ea8400ad73a18f95d561a059ed967a35a51a3cf7dec5bcb60d2b"} pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:41.878944 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" containerID="cri-o://a7057aba1403ea8400ad73a18f95d561a059ed967a35a51a3cf7dec5bcb60d2b" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.088583 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.171575 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.171650 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.172091 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"6f4d587a7f0be14a01e484d73dafb5cd608c621478dd0067188f227ebf2c91b9"} pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.172143 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" containerID="cri-o://6f4d587a7f0be14a01e484d73dafb5cd608c621478dd0067188f227ebf2c91b9" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.296325 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.296475 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.301003 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"d6a6c8972ffb22195afabc8abbbe57836951599491f7d56e0f1ccb85fe5c1408"} pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.301200 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" containerID="cri-o://d6a6c8972ffb22195afabc8abbbe57836951599491f7d56e0f1ccb85fe5c1408" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.378550 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.378618 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.378720 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.378552 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.378794 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.379103 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"8e442d849e1e2a7793d95f2aecdf99cf08c6d3bdf8a01c69e31cde4c7a068182"} pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.379144 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" containerID="cri-o://8e442d849e1e2a7793d95f2aecdf99cf08c6d3bdf8a01c69e31cde4c7a068182" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.379502 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.379827 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"32ce552b151021b6302d6d70852291265aaa06e5ce86abc1d75fbbef7d884d29"} pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.379888 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" containerID="cri-o://32ce552b151021b6302d6d70852291265aaa06e5ce86abc1d75fbbef7d884d29" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.461547 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.461556 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.461635 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.462398 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"8b5dd2182818ac145a99a2045749a6e0e27c8d642274a20036425a32d8d2622a"} pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.462442 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" containerID="cri-o://8b5dd2182818ac145a99a2045749a6e0e27c8d642274a20036425a32d8d2622a" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.462479 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.840533 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.840610 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.924594 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.924662 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.924555 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.924932 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.925455 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"aa066fd86e6abde2868c57032c9eacb76bfcb557b69603b3b907d9c06860a2e3"} pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.925494 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"97fbb0ff8b6683c6dcc524bd8f0de3303e143a5b7983fd2822dd0b356f9b2223"} pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.925538 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" containerID="cri-o://97fbb0ff8b6683c6dcc524bd8f0de3303e143a5b7983fd2822dd0b356f9b2223" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:42.925495 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" containerID="cri-o://aa066fd86e6abde2868c57032c9eacb76bfcb557b69603b3b907d9c06860a2e3" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.177527 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.177539 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.177739 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.178408 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"7b8b99244ccc8dd316b071e2b038d28d235bcd7226b1b5758497e7b989466252"} pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.178470 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" containerID="cri-o://7b8b99244ccc8dd316b071e2b038d28d235bcd7226b1b5758497e7b989466252" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.512506 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.512585 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.512984 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.513721 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"fa80cb586a41197fa2dcef6cf05fc2343aa6d0d9d04229ca9f13d5b01ce4b614"} pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.513764 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" containerID="cri-o://fa80cb586a41197fa2dcef6cf05fc2343aa6d0d9d04229ca9f13d5b01ce4b614" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.513742 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.513820 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.513932 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.513962 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.513991 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514017 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514033 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514077 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514119 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514148 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514158 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514188 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514212 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514453 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514797 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"040b794c193aaeaf7ac85bd6ea6573033270c8eb8ca274032f9644dcdfa95dda"} pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514831 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" containerID="cri-o://040b794c193aaeaf7ac85bd6ea6573033270c8eb8ca274032f9644dcdfa95dda" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514894 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"43a657ed2829dff77f3a0976877d8f91c0b277f5caafb10d001b679b39eaefa0"} pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.514931 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" containerID="cri-o://43a657ed2829dff77f3a0976877d8f91c0b277f5caafb10d001b679b39eaefa0" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.515041 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"4b483b263c9f017e85b230b326ec16e8c90af374a11b1481921a6959aaf93983"} pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.515086 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" containerID="cri-o://4b483b263c9f017e85b230b326ec16e8c90af374a11b1481921a6959aaf93983" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.515275 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"5d1b9cc2aa619d9343e4589553dafb3573017f169337089252045fff5aa5b226"} pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.515309 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" containerID="cri-o://5d1b9cc2aa619d9343e4589553dafb3573017f169337089252045fff5aa5b226" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.520093 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"735b740ab16198620a6d98149a3ea7645b34b368d0ae2e7aa0ead346f9d9fbdc"} pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.520222 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" containerID="cri-o://735b740ab16198620a6d98149a3ea7645b34b368d0ae2e7aa0ead346f9d9fbdc" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.637590 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.637638 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.638056 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.638113 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.639057 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"90e480ee20789985c1d438054b61ed9347d16bedf8e8859886ec3ec41a70fe70"} pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" containerMessage="Container manager failed liveness probe, will be restarted" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.639110 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" containerID="cri-o://90e480ee20789985c1d438054b61ed9347d16bedf8e8859886ec3ec41a70fe70" gracePeriod=10 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.720639 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.720623 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.721314 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.84:8081/readyz\": dial tcp 10.217.0.84:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.721446 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.721662 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.721670 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.721783 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.722982 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.84:8081/readyz\": dial tcp 10.217.0.84:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:43.967536 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.220511 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.761651 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.761699 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.761724 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.761657 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.802661 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.802710 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.802659 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.843584 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.906911 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": context deadline exceeded" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:44.906982 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz?exclude=etcd&exclude=etcd-readiness\": context deadline exceeded" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:47.428080 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:47.428142 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:47.457905 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]log ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:28:52 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:28:52 crc kubenswrapper[4926]: readyz check failed Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:47.457960 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:47.945084 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" podUID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerName="manager" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:48.066331 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-79c67b7c89-tcqww" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:48.297293 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" start-of-body= Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:48.297336 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:48.799464 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" containerID="cri-o://fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9" gracePeriod=22 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:48.906426 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" podUID="5fd3c793-dd9b-475c-b527-51c52d21e018" containerName="cert-manager-controller" probeResult="failure" output="Get \"http://10.217.0.63:9403/livez\": dial tcp 10.217.0.63:9403: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:49.257356 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:49.259629 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:49.261580 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:49.261686 4926 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerName="galera" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:49.465928 4926 generic.go:334] "Generic (PLEG): container finished" podID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerID="a7057aba1403ea8400ad73a18f95d561a059ed967a35a51a3cf7dec5bcb60d2b" exitCode=-1 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:49.465962 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerDied","Data":"a7057aba1403ea8400ad73a18f95d561a059ed967a35a51a3cf7dec5bcb60d2b"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:49.466039 4926 scope.go:117] "RemoveContainer" containerID="aa8d292d9a2f1699aa3fe0e2acec83441c679322e771da63fb1ccbd52e907abb" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:49.908088 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]log ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:28:52 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:28:52 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:28:52 crc kubenswrapper[4926]: readyz check failed Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:49.908134 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:50.481675 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerStarted","Data":"ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.045813 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" podUID="c8395389-762a-497d-972e-0987350a9a00" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.78:8081/readyz\": dial tcp 10.217.0.78:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.064419 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" podUID="421c1930-795c-4e93-9865-bff40d49ddf5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.79:8081/readyz\": dial tcp 10.217.0.79:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.093057 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" podUID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.80:8081/readyz\": dial tcp 10.217.0.80:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.127365 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" podUID="c8322c05-5b96-4489-87a7-1677f90df80c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": dial tcp 10.217.0.81:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.181622 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podUID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.83:8081/readyz\": dial tcp 10.217.0.83:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.329638 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" podUID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": dial tcp 10.217.0.85:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.675186 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" podUID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": dial tcp 10.217.0.91:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.712923 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" podUID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8081/readyz\": dial tcp 10.217.0.93:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.723684 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" podUID="74627669-e952-4db6-b082-5e7bd38b03b3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.95:8081/readyz\": dial tcp 10.217.0.95:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.781999 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" podUID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.96:8081/readyz\": dial tcp 10.217.0.96:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.783360 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" podUID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.87:8081/readyz\": dial tcp 10.217.0.87:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.798314 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" podUID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.88:8081/readyz\": dial tcp 10.217.0.88:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.827326 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" podUID="230b098e-8a89-417e-b5aa-994695273779" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": dial tcp 10.217.0.97:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.851501 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" podUID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.98:8081/readyz\": dial tcp 10.217.0.98:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.852097 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" podUID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.89:8081/readyz\": dial tcp 10.217.0.89:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.854069 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" podUID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": dial tcp 10.217.0.99:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:51.880314 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" podUID="306a2bb2-20b9-436d-809a-55499e85e4d6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.149748 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.149808 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.151742 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.151847 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.356477 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.610282 4926 generic.go:334] "Generic (PLEG): container finished" podID="f9d1a5dc-de6e-45fa-ab5d-1de529f40894" containerID="97fbb0ff8b6683c6dcc524bd8f0de3303e143a5b7983fd2822dd0b356f9b2223" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.610348 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerDied","Data":"97fbb0ff8b6683c6dcc524bd8f0de3303e143a5b7983fd2822dd0b356f9b2223"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.614648 4926 generic.go:334] "Generic (PLEG): container finished" podID="b70bd0b1-5555-49f4-ae5f-dfeebd005029" containerID="8b5dd2182818ac145a99a2045749a6e0e27c8d642274a20036425a32d8d2622a" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.614715 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerDied","Data":"8b5dd2182818ac145a99a2045749a6e0e27c8d642274a20036425a32d8d2622a"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.617466 4926 generic.go:334] "Generic (PLEG): container finished" podID="421c1930-795c-4e93-9865-bff40d49ddf5" containerID="6f4d587a7f0be14a01e484d73dafb5cd608c621478dd0067188f227ebf2c91b9" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.617521 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerDied","Data":"6f4d587a7f0be14a01e484d73dafb5cd608c621478dd0067188f227ebf2c91b9"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.618728 4926 generic.go:334] "Generic (PLEG): container finished" podID="13668c10-93bb-4198-a221-bee2b2ef685b" containerID="0d9f56399528f654f4e3fca7a64031d4b909cad493dd7f216a1e4b66acd6b753" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.618774 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" event={"ID":"13668c10-93bb-4198-a221-bee2b2ef685b","Type":"ContainerDied","Data":"0d9f56399528f654f4e3fca7a64031d4b909cad493dd7f216a1e4b66acd6b753"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.620279 4926 generic.go:334] "Generic (PLEG): container finished" podID="230b098e-8a89-417e-b5aa-994695273779" containerID="4b483b263c9f017e85b230b326ec16e8c90af374a11b1481921a6959aaf93983" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.620328 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerDied","Data":"4b483b263c9f017e85b230b326ec16e8c90af374a11b1481921a6959aaf93983"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.626112 4926 generic.go:334] "Generic (PLEG): container finished" podID="596d3616-ddec-489c-be4d-7e340f9e2acb" containerID="fa80cb586a41197fa2dcef6cf05fc2343aa6d0d9d04229ca9f13d5b01ce4b614" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.626165 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerDied","Data":"fa80cb586a41197fa2dcef6cf05fc2343aa6d0d9d04229ca9f13d5b01ce4b614"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.628582 4926 generic.go:334] "Generic (PLEG): container finished" podID="48648219-c573-4084-a23b-17ef23df2666" containerID="da96c067b6d67039a17a9d6a91f7d69b8a90a06213b76db7f91fd2c24e0468dc" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.628633 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" event={"ID":"48648219-c573-4084-a23b-17ef23df2666","Type":"ContainerDied","Data":"da96c067b6d67039a17a9d6a91f7d69b8a90a06213b76db7f91fd2c24e0468dc"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.630906 4926 generic.go:334] "Generic (PLEG): container finished" podID="c613eed5-f72e-4b4d-8283-5aa4e6241157" containerID="43a657ed2829dff77f3a0976877d8f91c0b277f5caafb10d001b679b39eaefa0" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.630954 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerDied","Data":"43a657ed2829dff77f3a0976877d8f91c0b277f5caafb10d001b679b39eaefa0"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.645620 4926 generic.go:334] "Generic (PLEG): container finished" podID="7ac21b6b-e21a-43db-acf1-cce61bf188ef" containerID="735b740ab16198620a6d98149a3ea7645b34b368d0ae2e7aa0ead346f9d9fbdc" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.645714 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" event={"ID":"7ac21b6b-e21a-43db-acf1-cce61bf188ef","Type":"ContainerDied","Data":"735b740ab16198620a6d98149a3ea7645b34b368d0ae2e7aa0ead346f9d9fbdc"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.655005 4926 generic.go:334] "Generic (PLEG): container finished" podID="5885db97-a86c-482e-9851-2d8351dc0c3a" containerID="5d1b9cc2aa619d9343e4589553dafb3573017f169337089252045fff5aa5b226" exitCode=1 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.655073 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerDied","Data":"5d1b9cc2aa619d9343e4589553dafb3573017f169337089252045fff5aa5b226"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.659553 4926 generic.go:334] "Generic (PLEG): container finished" podID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" containerID="cc5ae4a3c37a043d840d4e31b62bb3ae61c3b9659b55dfc49d3cb47098884770" exitCode=1 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.659617 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerDied","Data":"cc5ae4a3c37a043d840d4e31b62bb3ae61c3b9659b55dfc49d3cb47098884770"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.660343 4926 scope.go:117] "RemoveContainer" containerID="cc5ae4a3c37a043d840d4e31b62bb3ae61c3b9659b55dfc49d3cb47098884770" Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:52.660584 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-l4vqr_openstack-operators(c67a3051-deee-4c35-b2fd-73f0f96ccbac)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.671550 4926 generic.go:334] "Generic (PLEG): container finished" podID="4e869634-c2f9-4248-8ad7-dd9af0315f2b" containerID="32052467095ba9616c24c37332fe65479a6eaefedf898c4e9da34e2ff93b4abd" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.671638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" event={"ID":"4e869634-c2f9-4248-8ad7-dd9af0315f2b","Type":"ContainerDied","Data":"32052467095ba9616c24c37332fe65479a6eaefedf898c4e9da34e2ff93b4abd"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.675274 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8395389-762a-497d-972e-0987350a9a00" containerID="32ce552b151021b6302d6d70852291265aaa06e5ce86abc1d75fbbef7d884d29" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.675344 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerDied","Data":"32ce552b151021b6302d6d70852291265aaa06e5ce86abc1d75fbbef7d884d29"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.685114 4926 generic.go:334] "Generic (PLEG): container finished" podID="d992fc2a-a506-4c10-a8fa-1e3416074e73" containerID="7b8b99244ccc8dd316b071e2b038d28d235bcd7226b1b5758497e7b989466252" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.685170 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerDied","Data":"7b8b99244ccc8dd316b071e2b038d28d235bcd7226b1b5758497e7b989466252"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.700401 4926 generic.go:334] "Generic (PLEG): container finished" podID="c8322c05-5b96-4489-87a7-1677f90df80c" containerID="8e442d849e1e2a7793d95f2aecdf99cf08c6d3bdf8a01c69e31cde4c7a068182" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.700460 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerDied","Data":"8e442d849e1e2a7793d95f2aecdf99cf08c6d3bdf8a01c69e31cde4c7a068182"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.729726 4926 generic.go:334] "Generic (PLEG): container finished" podID="63cf85c5-12a7-4265-ae81-e968e686668b" containerID="d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.729807 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpdcl" event={"ID":"63cf85c5-12a7-4265-ae81-e968e686668b","Type":"ContainerDied","Data":"d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.783260 4926 generic.go:334] "Generic (PLEG): container finished" podID="5859a238-ed77-4ef7-ac69-295bd1c875c3" containerID="040b794c193aaeaf7ac85bd6ea6573033270c8eb8ca274032f9644dcdfa95dda" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.783361 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerDied","Data":"040b794c193aaeaf7ac85bd6ea6573033270c8eb8ca274032f9644dcdfa95dda"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.810762 4926 generic.go:334] "Generic (PLEG): container finished" podID="74627669-e952-4db6-b082-5e7bd38b03b3" containerID="aa066fd86e6abde2868c57032c9eacb76bfcb557b69603b3b907d9c06860a2e3" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.810850 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerDied","Data":"aa066fd86e6abde2868c57032c9eacb76bfcb557b69603b3b907d9c06860a2e3"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.820596 4926 scope.go:117] "RemoveContainer" containerID="002926d95da20628f9254acfbe76f850920ed7cd34f8c17c15467fb490ffb243" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.877016 4926 generic.go:334] "Generic (PLEG): container finished" podID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" containerID="e6d6051fdf0766ea2e164abcf85bc87a48912a46c04d51951693779ba109cb5a" exitCode=1 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.877286 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerDied","Data":"e6d6051fdf0766ea2e164abcf85bc87a48912a46c04d51951693779ba109cb5a"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.880924 4926 scope.go:117] "RemoveContainer" containerID="e6d6051fdf0766ea2e164abcf85bc87a48912a46c04d51951693779ba109cb5a" Nov 25 19:28:52 crc kubenswrapper[4926]: E1125 19:28:52.881360 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-h55p4_openstack-operators(1df300a3-1d64-4e46-a0b5-9fe0bf029321)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podUID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.889327 4926 generic.go:334] "Generic (PLEG): container finished" podID="e949ca02-dbd2-4361-8b44-a498d1ec4c13" containerID="d6a6c8972ffb22195afabc8abbbe57836951599491f7d56e0f1ccb85fe5c1408" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.889395 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerDied","Data":"d6a6c8972ffb22195afabc8abbbe57836951599491f7d56e0f1ccb85fe5c1408"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.896571 4926 generic.go:334] "Generic (PLEG): container finished" podID="b4c6b194-9a8e-4cdb-a0e0-e67dce03328f" containerID="90e480ee20789985c1d438054b61ed9347d16bedf8e8859886ec3ec41a70fe70" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.896658 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerDied","Data":"90e480ee20789985c1d438054b61ed9347d16bedf8e8859886ec3ec41a70fe70"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.898076 4926 generic.go:334] "Generic (PLEG): container finished" podID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerID="ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0" exitCode=0 Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.898104 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerDied","Data":"ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0"} Nov 25 19:28:52 crc kubenswrapper[4926]: I1125 19:28:52.966966 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.042095 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.042584 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.043318 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.253965 4926 scope.go:117] "RemoveContainer" containerID="836a14fd28bef75f97d0f71df6de974566af39efac23e0037f06b98e162f7490" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.330061 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" probeResult="failure" output=< Nov 25 19:28:53 crc kubenswrapper[4926]: Unkown error: Expecting value: line 1 column 1 (char 0) Nov 25 19:28:53 crc kubenswrapper[4926]: > Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.330137 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.908969 4926 generic.go:334] "Generic (PLEG): container finished" podID="5fd3c793-dd9b-475c-b527-51c52d21e018" containerID="1498ffdd9479e34b9c15388cba295e1484ab01daa8106521ded067f18eef006a" exitCode=1 Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.909065 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" event={"ID":"5fd3c793-dd9b-475c-b527-51c52d21e018","Type":"ContainerDied","Data":"1498ffdd9479e34b9c15388cba295e1484ab01daa8106521ded067f18eef006a"} Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.909961 4926 scope.go:117] "RemoveContainer" containerID="1498ffdd9479e34b9c15388cba295e1484ab01daa8106521ded067f18eef006a" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.914945 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" event={"ID":"4e869634-c2f9-4248-8ad7-dd9af0315f2b","Type":"ContainerStarted","Data":"c1ea7ad24465f24e6b472382f308d819cc01e0ff7578e6e0f8bbf263e371b2a7"} Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.915063 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.917320 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" event={"ID":"306a2bb2-20b9-436d-809a-55499e85e4d6","Type":"ContainerStarted","Data":"1f4cf4478c356b20bead97a2f48fd3f7efa4ed2276a8dae687b87115ad1d640f"} Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.917688 4926 scope.go:117] "RemoveContainer" containerID="cc5ae4a3c37a043d840d4e31b62bb3ae61c3b9659b55dfc49d3cb47098884770" Nov 25 19:28:53 crc kubenswrapper[4926]: E1125 19:28:53.917959 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-l4vqr_openstack-operators(c67a3051-deee-4c35-b2fd-73f0f96ccbac)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.919605 4926 status_manager.go:317] "Container readiness changed for unknown container" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" containerID="cri-o://a7057aba1403ea8400ad73a18f95d561a059ed967a35a51a3cf7dec5bcb60d2b" Nov 25 19:28:53 crc kubenswrapper[4926]: I1125 19:28:53.919647 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.091470 4926 scope.go:117] "RemoveContainer" containerID="94935ca3a2deec0f8acd6949955259ea0135b4277c3361f6eb4f8ffde26bac64" Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.785285 4926 scope.go:117] "RemoveContainer" containerID="6782f1067eb733a88442debab23adac3b9fd006c3cf8ff78de10cc8e60d3125c" Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.894589 4926 scope.go:117] "RemoveContainer" containerID="f03512d8b273f61a54a61e879b5b0b73d71041b65010d2609d63ee56cf4cd2ac" Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.917607 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]log ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:28:54 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:28:54 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:28:54 crc kubenswrapper[4926]: readyz check failed Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.917717 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.956231 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" event={"ID":"230b098e-8a89-417e-b5aa-994695273779","Type":"ContainerStarted","Data":"793b5fe2771db2ac8274e0157db0aa855dd731ff9cb0c761eb3bdb7bb51a1ea5"} Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.956622 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.959759 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" event={"ID":"421c1930-795c-4e93-9865-bff40d49ddf5","Type":"ContainerStarted","Data":"b899e5737d9975f70f8c8f9a550d027277bf0ccd1738e1e0281c32bf3e9fa7de"} Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.960366 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:28:54 crc kubenswrapper[4926]: I1125 19:28:54.994405 4926 scope.go:117] "RemoveContainer" containerID="6678a6ae3c2b9cee1611d0050520d9d6ec344073bd4c3afbe931da99f9f01331" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.004523 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" event={"ID":"d992fc2a-a506-4c10-a8fa-1e3416074e73","Type":"ContainerStarted","Data":"4dffb161f291f2d99df8977f513b7e3f6a8ee989a8b1655fddd30c46bc8af551"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.004729 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.029824 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" event={"ID":"596d3616-ddec-489c-be4d-7e340f9e2acb","Type":"ContainerStarted","Data":"4880672bf20788f39d3d5ecd928ca74681360b3434acaa34c83b4e6e0738dd61"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.029983 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.049529 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" event={"ID":"c613eed5-f72e-4b4d-8283-5aa4e6241157","Type":"ContainerStarted","Data":"e5307d51c95c296ecf6661a379f38a6f283717f127130e09d256fd602daed0b6"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.050614 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.059439 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" event={"ID":"e949ca02-dbd2-4361-8b44-a498d1ec4c13","Type":"ContainerStarted","Data":"1636f02d10873aa629cc1c9b657229cd3e62670ee10eea129a4bfcb6b34728f4"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.060240 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.073961 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" event={"ID":"c8395389-762a-497d-972e-0987350a9a00","Type":"ContainerStarted","Data":"7978d757287fce4a018034c2589b77e9fffe859583994fbdc68b6fb68d6ace47"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.075569 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.087729 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" event={"ID":"c8322c05-5b96-4489-87a7-1677f90df80c","Type":"ContainerStarted","Data":"b62aff91acde090e119354daff05b6705d0756363a3e3d972300118e59dc270b"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.088320 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.090440 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" event={"ID":"f9d1a5dc-de6e-45fa-ab5d-1de529f40894","Type":"ContainerStarted","Data":"2b237f74b8341a3e6b85bdad8275b0971fa7232c29fb25721b9b15c06a00227c"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.090916 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.111215 4926 generic.go:334] "Generic (PLEG): container finished" podID="b04996f9-1035-4982-bd9b-f96ee30cd663" containerID="fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9" exitCode=0 Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.111276 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b04996f9-1035-4982-bd9b-f96ee30cd663","Type":"ContainerDied","Data":"fcfd370461d46adb450c37f92e17d7f89711da1dc54a0f17032af5aa8e0f85a9"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.129335 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" event={"ID":"b4c6b194-9a8e-4cdb-a0e0-e67dce03328f","Type":"ContainerStarted","Data":"7380e2a66462551bfff2f8a86d950c1c728f30471e2edc0d38f221e3f4204ec9"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.130199 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.144074 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" event={"ID":"74627669-e952-4db6-b082-5e7bd38b03b3","Type":"ContainerStarted","Data":"67f17c05b26aeb85254d6e0da46aa52f7ce5cc99f0472a793feb2b838eef4c4d"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.144978 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.158955 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" event={"ID":"7ac21b6b-e21a-43db-acf1-cce61bf188ef","Type":"ContainerStarted","Data":"15bdd359de49620da3a46bb8a7a7569aa7e3b42c8a20c8987cfeb237a4099e85"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.159816 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.165161 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" event={"ID":"5885db97-a86c-482e-9851-2d8351dc0c3a","Type":"ContainerStarted","Data":"16bcd745651af6bd337e0a6e11f7ef36b5c12208bd691fd72750644144d1a441"} Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.165205 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.165216 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.329955 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:28:55 crc kubenswrapper[4926]: E1125 19:28:55.330569 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.353017 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.353272 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.354206 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-scheduler" containerStatusID={"Type":"cri-o","ID":"0a7e3c5015ab61589ea8cdadcbaa25bb631eee2e4e6925edac60aad9d8bc695a"} pod="openstack/cinder-scheduler-0" containerMessage="Container cinder-scheduler failed liveness probe, will be restarted" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.354345 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" containerID="cri-o://0a7e3c5015ab61589ea8cdadcbaa25bb631eee2e4e6925edac60aad9d8bc695a" gracePeriod=30 Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.640886 4926 scope.go:117] "RemoveContainer" containerID="fe7de231f90153efe556ecbd82e9a9aac08ff78998823dc181be1c432f0e3255" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.757576 4926 scope.go:117] "RemoveContainer" containerID="ffa2c71c951124c8264c6fff559319e155d555bea8b2d0f423d562716755ca64" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.827950 4926 scope.go:117] "RemoveContainer" containerID="ac7fe094232b30f96f7d3cb92f2bd9e120f268fba1447365cbdad1cda1c2cf02" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.912804 4926 scope.go:117] "RemoveContainer" containerID="d33a3388e5be48ae779ab3689c51486c6d865e7005fd45b826b74f00d8c84e5e" Nov 25 19:28:55 crc kubenswrapper[4926]: I1125 19:28:55.973486 4926 scope.go:117] "RemoveContainer" containerID="567036ec5d29a9c21b85ae7c2dfe2b5a83480567adc97226cd202ce7341b12d1" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.037838 4926 scope.go:117] "RemoveContainer" containerID="9256f4f797c3335af8831fe0c369394beb4aade0b3688bbbb68f3281c30b3a10" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.064005 4926 scope.go:117] "RemoveContainer" containerID="f890fe33e6583a0fa972bc38c82bcc52e7120f1cd489d1fea6cb9a0d529eead2" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.086747 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.086835 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-backup-0" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.087645 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-backup" containerStatusID={"Type":"cri-o","ID":"cd8e934b1c32c1d795367e8500d9af927d7b7f461a9c65499d156a46364ddfda"} pod="openstack/cinder-backup-0" containerMessage="Container cinder-backup failed liveness probe, will be restarted" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.087696 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" containerID="cri-o://cd8e934b1c32c1d795367e8500d9af927d7b7f461a9c65499d156a46364ddfda" gracePeriod=30 Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.097182 4926 scope.go:117] "RemoveContainer" containerID="60d4857bdec22a58b5cc3e8cacf4bee063fe15f4afdbeea7dc1306e5b8e90931" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.129815 4926 scope.go:117] "RemoveContainer" containerID="6b8522a4a4cbe9d8cf7a9dd27e22fe142c9911073fbec4a2fe1eb32cef2a7fe6" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.148790 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.148856 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.149689 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-volume" containerStatusID={"Type":"cri-o","ID":"6222abf7d6eadc2029c22566b1527522e45e4346b4cc1b3a71e3ad27c3937f82"} pod="openstack/cinder-volume-nfs-2-0" containerMessage="Container cinder-volume failed liveness probe, will be restarted" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.149740 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" containerID="cri-o://6222abf7d6eadc2029c22566b1527522e45e4346b4cc1b3a71e3ad27c3937f82" gracePeriod=30 Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.157542 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.157613 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-volume-nfs-0" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.158430 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-volume" containerStatusID={"Type":"cri-o","ID":"4e9abf6ec2d5c003fb5d4992bafa57c4f0331ac2421329cb1146be37b2c5ffdc"} pod="openstack/cinder-volume-nfs-0" containerMessage="Container cinder-volume failed liveness probe, will be restarted" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.158482 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" containerID="cri-o://4e9abf6ec2d5c003fb5d4992bafa57c4f0331ac2421329cb1146be37b2c5ffdc" gracePeriod=30 Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.180297 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" event={"ID":"48648219-c573-4084-a23b-17ef23df2666","Type":"ContainerStarted","Data":"cf8b0533d07842079814e11c45cc8a219dfc010d15c6647e8e699924ed18cf17"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.182107 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.182192 4926 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-45q9w container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" start-of-body= Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.182224 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" podUID="48648219-c573-4084-a23b-17ef23df2666" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": dial tcp 10.217.0.44:8081: connect: connection refused" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.200538 4926 scope.go:117] "RemoveContainer" containerID="95276af96797f9b4a4c55845508dec77a098e776eff0830275ab53932e78c2f5" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.229360 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerStarted","Data":"d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.238506 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" event={"ID":"b70bd0b1-5555-49f4-ae5f-dfeebd005029","Type":"ContainerStarted","Data":"2a8a769764f587c41ba567aa6f99fb11057431d5b2944a5e9aa40fac5c04740e"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.239118 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.269809 4926 scope.go:117] "RemoveContainer" containerID="2b923303522d456ac34c475e103cfea66cd0f4fc2f1e1137ec8064d89ceb8efb" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.313526 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" event={"ID":"13668c10-93bb-4198-a221-bee2b2ef685b","Type":"ContainerStarted","Data":"a228ab5cd23789315ee0fa688b45f1c05d3fd26122db9303663e59de73fcc447"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.313753 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.313952 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": dial tcp 10.217.0.53:8443: connect: connection refused" start-of-body= Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.313995 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": dial tcp 10.217.0.53:8443: connect: connection refused" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.319121 4926 scope.go:117] "RemoveContainer" containerID="60bd041d44162b4f0845f7ace8cd6a6df4d0e8da6b46b381095802d1b47b68cb" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.353566 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" event={"ID":"5859a238-ed77-4ef7-ac69-295bd1c875c3","Type":"ContainerStarted","Data":"a6317bacaf8e4d664546cc0c4dcc2233d4a6550e443fe66eebd43956bb63f63a"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.353603 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.400474 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b04996f9-1035-4982-bd9b-f96ee30cd663","Type":"ContainerStarted","Data":"1a296c6fd579d9c14b7dc9fcd433b3d3b4543bae8b3246f1602ddd532b5d03b7"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.403465 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-x8r5t" event={"ID":"5fd3c793-dd9b-475c-b527-51c52d21e018","Type":"ContainerStarted","Data":"57bbffff49a014a2d943809eab8e1887136723d12e4aecbe6c88d933ddd8d093"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.406675 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpdcl" event={"ID":"63cf85c5-12a7-4265-ae81-e968e686668b","Type":"ContainerStarted","Data":"b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc"} Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.428134 4926 patch_prober.go:28] interesting pod/controller-manager-5f688c6b94-rxxnn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/healthz\": dial tcp 10.217.0.53:8443: connect: connection refused" start-of-body= Nov 25 19:28:56 crc kubenswrapper[4926]: I1125 19:28:56.428180 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" podUID="13668c10-93bb-4198-a221-bee2b2ef685b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.53:8443/healthz\": dial tcp 10.217.0.53:8443: connect: connection refused" Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.415881 4926 generic.go:334] "Generic (PLEG): container finished" podID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerID="d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58" exitCode=0 Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.415917 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerDied","Data":"d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58"} Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.416331 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerStarted","Data":"1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae"} Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.418225 4926 generic.go:334] "Generic (PLEG): container finished" podID="63cf85c5-12a7-4265-ae81-e968e686668b" containerID="b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc" exitCode=0 Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.418277 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpdcl" event={"ID":"63cf85c5-12a7-4265-ae81-e968e686668b","Type":"ContainerDied","Data":"b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc"} Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.418292 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpdcl" event={"ID":"63cf85c5-12a7-4265-ae81-e968e686668b","Type":"ContainerStarted","Data":"ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266"} Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.431959 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f688c6b94-rxxnn" Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.435318 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rl7hc" Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.442548 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tsnq2" podStartSLOduration=71.855347265 podStartE2EDuration="1m15.442533885s" podCreationTimestamp="2025-11-25 19:27:42 +0000 UTC" firstStartedPulling="2025-11-25 19:28:53.072403041 +0000 UTC m=+4563.457916646" lastFinishedPulling="2025-11-25 19:28:56.659589661 +0000 UTC m=+4567.045103266" observedRunningTime="2025-11-25 19:28:57.441763873 +0000 UTC m=+4567.827277468" watchObservedRunningTime="2025-11-25 19:28:57.442533885 +0000 UTC m=+4567.828047490" Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.471021 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-45q9w" Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.527518 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jpdcl" podStartSLOduration=64.230912401 podStartE2EDuration="1m8.527500971s" podCreationTimestamp="2025-11-25 19:27:49 +0000 UTC" firstStartedPulling="2025-11-25 19:28:52.734595331 +0000 UTC m=+4563.120108936" lastFinishedPulling="2025-11-25 19:28:57.031183901 +0000 UTC m=+4567.416697506" observedRunningTime="2025-11-25 19:28:57.497127963 +0000 UTC m=+4567.882641568" watchObservedRunningTime="2025-11-25 19:28:57.527500971 +0000 UTC m=+4567.913014566" Nov 25 19:28:57 crc kubenswrapper[4926]: I1125 19:28:57.592775 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74" Nov 25 19:28:58 crc kubenswrapper[4926]: E1125 19:28:58.041584 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad6a0baa_57a5_47d8_81fc_4395a6f4079a.slice/crio-0a7e3c5015ab61589ea8cdadcbaa25bb631eee2e4e6925edac60aad9d8bc695a.scope\": RecentStats: unable to find data in memory cache]" Nov 25 19:28:58 crc kubenswrapper[4926]: I1125 19:28:58.438133 4926 generic.go:334] "Generic (PLEG): container finished" podID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerID="0a7e3c5015ab61589ea8cdadcbaa25bb631eee2e4e6925edac60aad9d8bc695a" exitCode=0 Nov 25 19:28:58 crc kubenswrapper[4926]: I1125 19:28:58.439257 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad6a0baa-57a5-47d8-81fc-4395a6f4079a","Type":"ContainerDied","Data":"0a7e3c5015ab61589ea8cdadcbaa25bb631eee2e4e6925edac60aad9d8bc695a"} Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.255096 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.255554 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.451937 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ad6a0baa-57a5-47d8-81fc-4395a6f4079a","Type":"ContainerStarted","Data":"cf526ffd206a996d6efb99f7977b23795247a61191cccc2ab6318c6d765675ef"} Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.456064 4926 generic.go:334] "Generic (PLEG): container finished" podID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerID="4e9abf6ec2d5c003fb5d4992bafa57c4f0331ac2421329cb1146be37b2c5ffdc" exitCode=0 Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.456140 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"899c3fbb-eb5b-46b6-b535-27d400c4b40e","Type":"ContainerDied","Data":"4e9abf6ec2d5c003fb5d4992bafa57c4f0331ac2421329cb1146be37b2c5ffdc"} Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.456172 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"899c3fbb-eb5b-46b6-b535-27d400c4b40e","Type":"ContainerStarted","Data":"735357a6a42d842dd999f5645ef0bbca9fc9e78606ac16a377095c23b3d13931"} Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.907239 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]log ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:28:59 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:28:59 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:28:59 crc kubenswrapper[4926]: readyz check failed Nov 25 19:28:59 crc kubenswrapper[4926]: I1125 19:28:59.907634 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:00 crc kubenswrapper[4926]: I1125 19:29:00.313461 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:29:00 crc kubenswrapper[4926]: I1125 19:29:00.313825 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:29:00 crc kubenswrapper[4926]: I1125 19:29:00.378259 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:29:00 crc kubenswrapper[4926]: I1125 19:29:00.469960 4926 generic.go:334] "Generic (PLEG): container finished" podID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerID="cd8e934b1c32c1d795367e8500d9af927d7b7f461a9c65499d156a46364ddfda" exitCode=0 Nov 25 19:29:00 crc kubenswrapper[4926]: I1125 19:29:00.470055 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7d6bbb74-3796-44c9-a153-84fd8de6f338","Type":"ContainerDied","Data":"cd8e934b1c32c1d795367e8500d9af927d7b7f461a9c65499d156a46364ddfda"} Nov 25 19:29:00 crc kubenswrapper[4926]: I1125 19:29:00.470323 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"7d6bbb74-3796-44c9-a153-84fd8de6f338","Type":"ContainerStarted","Data":"a42f4fd926a4d8bfe6e638150c7f7d374eed1d3ad47f7b80c5471e455187a92a"} Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.047191 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hhl9b" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.074460 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-dxhsp" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.095713 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-bpsp8" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.130098 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-8w6rx" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.181509 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.181568 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.182353 4926 scope.go:117] "RemoveContainer" containerID="e6d6051fdf0766ea2e164abcf85bc87a48912a46c04d51951693779ba109cb5a" Nov 25 19:29:01 crc kubenswrapper[4926]: E1125 19:29:01.182693 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-h55p4_openstack-operators(1df300a3-1d64-4e46-a0b5-9fe0bf029321)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podUID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.330923 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-89dkl" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.676780 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-s4wxr" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.714664 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-9f2dg" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.729689 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-k8j22" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.787301 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-mc5kd" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.797647 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-44shk" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.799975 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-5nvnv" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.828849 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-gcvkp" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.854178 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-rslqc" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.854221 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-5nnqb" Nov 25 19:29:01 crc kubenswrapper[4926]: I1125 19:29:01.859986 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-647d45fc97-x65c4" Nov 25 19:29:03 crc kubenswrapper[4926]: I1125 19:29:03.317248 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:29:03 crc kubenswrapper[4926]: I1125 19:29:03.317300 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:29:03 crc kubenswrapper[4926]: I1125 19:29:03.374678 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:29:03 crc kubenswrapper[4926]: I1125 19:29:03.547568 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:29:03 crc kubenswrapper[4926]: I1125 19:29:03.858821 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 25 19:29:03 crc kubenswrapper[4926]: I1125 19:29:03.976023 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-0" Nov 25 19:29:04 crc kubenswrapper[4926]: I1125 19:29:04.013329 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:04 crc kubenswrapper[4926]: I1125 19:29:04.344978 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 19:29:04 crc kubenswrapper[4926]: I1125 19:29:04.355504 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:04 crc kubenswrapper[4926]: I1125 19:29:04.509255 4926 generic.go:334] "Generic (PLEG): container finished" podID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerID="6222abf7d6eadc2029c22566b1527522e45e4346b4cc1b3a71e3ad27c3937f82" exitCode=0 Nov 25 19:29:04 crc kubenswrapper[4926]: I1125 19:29:04.510104 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"7a824532-6df2-4a8d-a6ae-1859686e6bb5","Type":"ContainerDied","Data":"6222abf7d6eadc2029c22566b1527522e45e4346b4cc1b3a71e3ad27c3937f82"} Nov 25 19:29:04 crc kubenswrapper[4926]: I1125 19:29:04.906894 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:04 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:04 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:29:04 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:04 crc kubenswrapper[4926]: I1125 19:29:04.907242 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:05 crc kubenswrapper[4926]: I1125 19:29:05.520273 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"7a824532-6df2-4a8d-a6ae-1859686e6bb5","Type":"ContainerStarted","Data":"42eb3be01662d2aa3302659a5b3fbbc66a30160eea87a351a49747c9eb9b96ff"} Nov 25 19:29:06 crc kubenswrapper[4926]: I1125 19:29:06.329744 4926 scope.go:117] "RemoveContainer" containerID="cc5ae4a3c37a043d840d4e31b62bb3ae61c3b9659b55dfc49d3cb47098884770" Nov 25 19:29:06 crc kubenswrapper[4926]: I1125 19:29:06.329815 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:29:06 crc kubenswrapper[4926]: E1125 19:29:06.330106 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-l4vqr_openstack-operators(c67a3051-deee-4c35-b2fd-73f0f96ccbac)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" podUID="c67a3051-deee-4c35-b2fd-73f0f96ccbac" Nov 25 19:29:06 crc kubenswrapper[4926]: E1125 19:29:06.330132 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:29:06 crc kubenswrapper[4926]: I1125 19:29:06.535405 4926 generic.go:334] "Generic (PLEG): container finished" podID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerID="40b3aa50db332d427a84aa93a155df94b23951f83d0bc8e8aa18b22c3b3bc76a" exitCode=137 Nov 25 19:29:06 crc kubenswrapper[4926]: I1125 19:29:06.535549 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerDied","Data":"40b3aa50db332d427a84aa93a155df94b23951f83d0bc8e8aa18b22c3b3bc76a"} Nov 25 19:29:07 crc kubenswrapper[4926]: I1125 19:29:07.551821 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerStarted","Data":"360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2"} Nov 25 19:29:07 crc kubenswrapper[4926]: I1125 19:29:07.552528 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-notification-agent" containerStatusID={"Type":"cri-o","ID":"70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-notification-agent failed liveness probe, will be restarted" Nov 25 19:29:07 crc kubenswrapper[4926]: I1125 19:29:07.552591 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" containerID="cri-o://70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438" gracePeriod=30 Nov 25 19:29:08 crc kubenswrapper[4926]: I1125 19:29:08.880832 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:08 crc kubenswrapper[4926]: I1125 19:29:08.990433 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:09 crc kubenswrapper[4926]: I1125 19:29:09.002200 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:29:09 crc kubenswrapper[4926]: I1125 19:29:09.029893 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:09 crc kubenswrapper[4926]: I1125 19:29:09.351298 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:09 crc kubenswrapper[4926]: I1125 19:29:09.907196 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:09 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:09 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:29:09 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:09 crc kubenswrapper[4926]: I1125 19:29:09.907602 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:10 crc kubenswrapper[4926]: I1125 19:29:10.397269 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:29:10 crc kubenswrapper[4926]: I1125 19:29:10.462040 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpdcl"] Nov 25 19:29:10 crc kubenswrapper[4926]: I1125 19:29:10.474334 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tsnq2"] Nov 25 19:29:10 crc kubenswrapper[4926]: I1125 19:29:10.474596 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tsnq2" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="registry-server" containerID="cri-o://1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae" gracePeriod=2 Nov 25 19:29:10 crc kubenswrapper[4926]: I1125 19:29:10.589941 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jpdcl" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="registry-server" containerID="cri-o://ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266" gracePeriod=2 Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.067655 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.196225 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.256809 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvzfs\" (UniqueName: \"kubernetes.io/projected/164bd684-7cad-4fd5-ae07-32ca3fd631ff-kube-api-access-cvzfs\") pod \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.256987 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-catalog-content\") pod \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.257152 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-utilities\") pod \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\" (UID: \"164bd684-7cad-4fd5-ae07-32ca3fd631ff\") " Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.258666 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-utilities" (OuterVolumeSpecName: "utilities") pod "164bd684-7cad-4fd5-ae07-32ca3fd631ff" (UID: "164bd684-7cad-4fd5-ae07-32ca3fd631ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.263774 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/164bd684-7cad-4fd5-ae07-32ca3fd631ff-kube-api-access-cvzfs" (OuterVolumeSpecName: "kube-api-access-cvzfs") pod "164bd684-7cad-4fd5-ae07-32ca3fd631ff" (UID: "164bd684-7cad-4fd5-ae07-32ca3fd631ff"). InnerVolumeSpecName "kube-api-access-cvzfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.278600 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "164bd684-7cad-4fd5-ae07-32ca3fd631ff" (UID: "164bd684-7cad-4fd5-ae07-32ca3fd631ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.358562 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-utilities\") pod \"63cf85c5-12a7-4265-ae81-e968e686668b\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.358664 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zq8r\" (UniqueName: \"kubernetes.io/projected/63cf85c5-12a7-4265-ae81-e968e686668b-kube-api-access-2zq8r\") pod \"63cf85c5-12a7-4265-ae81-e968e686668b\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.358758 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-catalog-content\") pod \"63cf85c5-12a7-4265-ae81-e968e686668b\" (UID: \"63cf85c5-12a7-4265-ae81-e968e686668b\") " Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.359296 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.359307 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/164bd684-7cad-4fd5-ae07-32ca3fd631ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.359316 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvzfs\" (UniqueName: \"kubernetes.io/projected/164bd684-7cad-4fd5-ae07-32ca3fd631ff-kube-api-access-cvzfs\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.359501 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-utilities" (OuterVolumeSpecName: "utilities") pod "63cf85c5-12a7-4265-ae81-e968e686668b" (UID: "63cf85c5-12a7-4265-ae81-e968e686668b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.374814 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63cf85c5-12a7-4265-ae81-e968e686668b-kube-api-access-2zq8r" (OuterVolumeSpecName: "kube-api-access-2zq8r") pod "63cf85c5-12a7-4265-ae81-e968e686668b" (UID: "63cf85c5-12a7-4265-ae81-e968e686668b"). InnerVolumeSpecName "kube-api-access-2zq8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.380779 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63cf85c5-12a7-4265-ae81-e968e686668b" (UID: "63cf85c5-12a7-4265-ae81-e968e686668b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.461441 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.461485 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zq8r\" (UniqueName: \"kubernetes.io/projected/63cf85c5-12a7-4265-ae81-e968e686668b-kube-api-access-2zq8r\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.461498 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63cf85c5-12a7-4265-ae81-e968e686668b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.602826 4926 generic.go:334] "Generic (PLEG): container finished" podID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerID="1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae" exitCode=0 Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.602894 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerDied","Data":"1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae"} Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.603311 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tsnq2" event={"ID":"164bd684-7cad-4fd5-ae07-32ca3fd631ff","Type":"ContainerDied","Data":"8ccebb53f699a1e03d8484b41d05d8acbdc2f1a87314521a520f725811622042"} Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.603339 4926 scope.go:117] "RemoveContainer" containerID="1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.602912 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tsnq2" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.610179 4926 generic.go:334] "Generic (PLEG): container finished" podID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerID="70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438" exitCode=0 Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.610438 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerDied","Data":"70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438"} Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.610562 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerStarted","Data":"9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54"} Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.638718 4926 generic.go:334] "Generic (PLEG): container finished" podID="63cf85c5-12a7-4265-ae81-e968e686668b" containerID="ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266" exitCode=0 Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.638789 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpdcl" event={"ID":"63cf85c5-12a7-4265-ae81-e968e686668b","Type":"ContainerDied","Data":"ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266"} Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.638827 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpdcl" event={"ID":"63cf85c5-12a7-4265-ae81-e968e686668b","Type":"ContainerDied","Data":"13272c45fcbec3780642d447737257cbdd2f08e0245e0e66d6441ce70d34cd11"} Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.638920 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpdcl" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.707556 4926 scope.go:117] "RemoveContainer" containerID="d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.807469 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tsnq2"] Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.813992 4926 scope.go:117] "RemoveContainer" containerID="ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.818615 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tsnq2"] Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.828563 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpdcl"] Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.837731 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpdcl"] Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.841491 4926 scope.go:117] "RemoveContainer" containerID="1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae" Nov 25 19:29:11 crc kubenswrapper[4926]: E1125 19:29:11.841887 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae\": container with ID starting with 1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae not found: ID does not exist" containerID="1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.841929 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae"} err="failed to get container status \"1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae\": rpc error: code = NotFound desc = could not find container \"1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae\": container with ID starting with 1d650df369f26ca7f000ced635b46acb6c9602ebe1723f2a99389141b61548ae not found: ID does not exist" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.841957 4926 scope.go:117] "RemoveContainer" containerID="d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58" Nov 25 19:29:11 crc kubenswrapper[4926]: E1125 19:29:11.842215 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58\": container with ID starting with d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58 not found: ID does not exist" containerID="d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.842246 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58"} err="failed to get container status \"d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58\": rpc error: code = NotFound desc = could not find container \"d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58\": container with ID starting with d5bc20e163f27db33a7a9083bc3048cd8b13994a5eaaa0108f2865d5ebb4bf58 not found: ID does not exist" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.842269 4926 scope.go:117] "RemoveContainer" containerID="ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0" Nov 25 19:29:11 crc kubenswrapper[4926]: E1125 19:29:11.842576 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0\": container with ID starting with ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0 not found: ID does not exist" containerID="ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.842600 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0"} err="failed to get container status \"ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0\": rpc error: code = NotFound desc = could not find container \"ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0\": container with ID starting with ed05e82e50303a8311d9a3ca4d9b93c84098cc2f4de25493131ab3d079a139d0 not found: ID does not exist" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.842615 4926 scope.go:117] "RemoveContainer" containerID="ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.896420 4926 scope.go:117] "RemoveContainer" containerID="b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.917716 4926 scope.go:117] "RemoveContainer" containerID="d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.971862 4926 scope.go:117] "RemoveContainer" containerID="ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266" Nov 25 19:29:11 crc kubenswrapper[4926]: E1125 19:29:11.972344 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266\": container with ID starting with ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266 not found: ID does not exist" containerID="ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.972398 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266"} err="failed to get container status \"ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266\": rpc error: code = NotFound desc = could not find container \"ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266\": container with ID starting with ce86c607685bc043e72ee8ddf029da60bb3c97c07f2d641ea13c0f65c9eb2266 not found: ID does not exist" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.972425 4926 scope.go:117] "RemoveContainer" containerID="b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc" Nov 25 19:29:11 crc kubenswrapper[4926]: E1125 19:29:11.973951 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc\": container with ID starting with b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc not found: ID does not exist" containerID="b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.973975 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc"} err="failed to get container status \"b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc\": rpc error: code = NotFound desc = could not find container \"b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc\": container with ID starting with b0d94defd86e7fbc9ae8d737c8d0b24d71e08de2a51ea65c56ce5b18dc324bbc not found: ID does not exist" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.974025 4926 scope.go:117] "RemoveContainer" containerID="d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0" Nov 25 19:29:11 crc kubenswrapper[4926]: E1125 19:29:11.974513 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0\": container with ID starting with d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0 not found: ID does not exist" containerID="d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0" Nov 25 19:29:11 crc kubenswrapper[4926]: I1125 19:29:11.974541 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0"} err="failed to get container status \"d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0\": rpc error: code = NotFound desc = could not find container \"d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0\": container with ID starting with d36a48deff8caebcffa16363b330cad503c259faa3a17f8de734e0d24805b0b0 not found: ID does not exist" Nov 25 19:29:12 crc kubenswrapper[4926]: I1125 19:29:12.330699 4926 scope.go:117] "RemoveContainer" containerID="e6d6051fdf0766ea2e164abcf85bc87a48912a46c04d51951693779ba109cb5a" Nov 25 19:29:12 crc kubenswrapper[4926]: E1125 19:29:12.331138 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-h55p4_openstack-operators(1df300a3-1d64-4e46-a0b5-9fe0bf029321)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" podUID="1df300a3-1d64-4e46-a0b5-9fe0bf029321" Nov 25 19:29:12 crc kubenswrapper[4926]: I1125 19:29:12.349594 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" path="/var/lib/kubelet/pods/164bd684-7cad-4fd5-ae07-32ca3fd631ff/volumes" Nov 25 19:29:12 crc kubenswrapper[4926]: I1125 19:29:12.350920 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" path="/var/lib/kubelet/pods/63cf85c5-12a7-4265-ae81-e968e686668b/volumes" Nov 25 19:29:13 crc kubenswrapper[4926]: I1125 19:29:13.891242 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:14 crc kubenswrapper[4926]: I1125 19:29:14.006529 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:14 crc kubenswrapper[4926]: I1125 19:29:14.026241 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:14 crc kubenswrapper[4926]: I1125 19:29:14.370103 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:14 crc kubenswrapper[4926]: I1125 19:29:14.908086 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:14 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:14 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:29:14 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:14 crc kubenswrapper[4926]: I1125 19:29:14.908133 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:17 crc kubenswrapper[4926]: I1125 19:29:17.329637 4926 scope.go:117] "RemoveContainer" containerID="cc5ae4a3c37a043d840d4e31b62bb3ae61c3b9659b55dfc49d3cb47098884770" Nov 25 19:29:17 crc kubenswrapper[4926]: I1125 19:29:17.712738 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" event={"ID":"c67a3051-deee-4c35-b2fd-73f0f96ccbac","Type":"ContainerStarted","Data":"1e35304a8aebdfa1265a6b770c131ace8468842e1276c4c0cf0bfc02cbf30193"} Nov 25 19:29:17 crc kubenswrapper[4926]: I1125 19:29:17.713439 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:29:18 crc kubenswrapper[4926]: I1125 19:29:18.330211 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:29:18 crc kubenswrapper[4926]: E1125 19:29:18.330813 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:29:18 crc kubenswrapper[4926]: I1125 19:29:18.887986 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:19 crc kubenswrapper[4926]: I1125 19:29:19.006966 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:19 crc kubenswrapper[4926]: I1125 19:29:19.032842 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:19 crc kubenswrapper[4926]: I1125 19:29:19.363220 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:19 crc kubenswrapper[4926]: I1125 19:29:19.911159 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:19 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:19 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:29:19 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:19 crc kubenswrapper[4926]: I1125 19:29:19.911725 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:23 crc kubenswrapper[4926]: I1125 19:29:23.048689 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-l4vqr" Nov 25 19:29:23 crc kubenswrapper[4926]: I1125 19:29:23.911908 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:24 crc kubenswrapper[4926]: I1125 19:29:24.020929 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:24 crc kubenswrapper[4926]: I1125 19:29:24.050192 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:24 crc kubenswrapper[4926]: I1125 19:29:24.360643 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:24 crc kubenswrapper[4926]: I1125 19:29:24.910894 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:24 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:24 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:29:24 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:24 crc kubenswrapper[4926]: I1125 19:29:24.911950 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:26 crc kubenswrapper[4926]: I1125 19:29:26.329021 4926 scope.go:117] "RemoveContainer" containerID="e6d6051fdf0766ea2e164abcf85bc87a48912a46c04d51951693779ba109cb5a" Nov 25 19:29:26 crc kubenswrapper[4926]: I1125 19:29:26.825748 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" event={"ID":"1df300a3-1d64-4e46-a0b5-9fe0bf029321","Type":"ContainerStarted","Data":"c5bcf5ab72d2e5dde45dc893ff5a7c097723741667583c5db0363ad30c4bb833"} Nov 25 19:29:26 crc kubenswrapper[4926]: I1125 19:29:26.826358 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:29:28 crc kubenswrapper[4926]: I1125 19:29:28.881313 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:28 crc kubenswrapper[4926]: I1125 19:29:28.991646 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:29 crc kubenswrapper[4926]: I1125 19:29:29.022920 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:29 crc kubenswrapper[4926]: I1125 19:29:29.329947 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:29:29 crc kubenswrapper[4926]: E1125 19:29:29.330396 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:29:29 crc kubenswrapper[4926]: I1125 19:29:29.510667 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:29 crc kubenswrapper[4926]: I1125 19:29:29.906407 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:29 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:29 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:29:29 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:29 crc kubenswrapper[4926]: I1125 19:29:29.907457 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:31 crc kubenswrapper[4926]: I1125 19:29:31.184455 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-h55p4" Nov 25 19:29:33 crc kubenswrapper[4926]: I1125 19:29:33.795144 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:29:33 crc kubenswrapper[4926]: I1125 19:29:33.795565 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="proxy-httpd" containerID="cri-o://aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569" gracePeriod=30 Nov 25 19:29:33 crc kubenswrapper[4926]: I1125 19:29:33.795598 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" containerID="cri-o://360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2" gracePeriod=30 Nov 25 19:29:33 crc kubenswrapper[4926]: I1125 19:29:33.796044 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" containerID="cri-o://9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54" gracePeriod=30 Nov 25 19:29:33 crc kubenswrapper[4926]: I1125 19:29:33.795487 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="sg-core" containerID="cri-o://9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad" gracePeriod=30 Nov 25 19:29:33 crc kubenswrapper[4926]: I1125 19:29:33.884137 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-backup-0" podUID="7d6bbb74-3796-44c9-a153-84fd8de6f338" containerName="cinder-backup" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.011306 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-0" podUID="899c3fbb-eb5b-46b6-b535-27d400c4b40e" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.027581 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-volume-nfs-2-0" podUID="7a824532-6df2-4a8d-a6ae-1859686e6bb5" containerName="cinder-volume" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.356005 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="ad6a0baa-57a5-47d8-81fc-4395a6f4079a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.909081 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]informer-sync ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-bootstrapclusterroles ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:34 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:34 crc kubenswrapper[4926]: [-]shutdown failed: reason withheld Nov 25 19:29:34 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.909166 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.916443 4926 generic.go:334] "Generic (PLEG): container finished" podID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerID="360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2" exitCode=0 Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.916483 4926 generic.go:334] "Generic (PLEG): container finished" podID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerID="aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569" exitCode=0 Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.916496 4926 generic.go:334] "Generic (PLEG): container finished" podID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerID="9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad" exitCode=2 Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.916524 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerDied","Data":"360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2"} Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.916746 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerDied","Data":"aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569"} Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.916772 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerDied","Data":"9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad"} Nov 25 19:29:34 crc kubenswrapper[4926]: I1125 19:29:34.916794 4926 scope.go:117] "RemoveContainer" containerID="40b3aa50db332d427a84aa93a155df94b23951f83d0bc8e8aa18b22c3b3bc76a" Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.716177 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.852349 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.857697 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.938086 4926 generic.go:334] "Generic (PLEG): container finished" podID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerID="9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54" exitCode=0 Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.939013 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.939593 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerDied","Data":"9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54"} Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.939657 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e92da70d-89aa-4e1f-9961-3cb2334fc573","Type":"ContainerDied","Data":"522103df9fee15e565cc30348a25626ac7c4efe1b996e56c6a692d4a2093834b"} Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.939681 4926 scope.go:117] "RemoveContainer" containerID="9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54" Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.970324 4926 scope.go:117] "RemoveContainer" containerID="360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2" Nov 25 19:29:35 crc kubenswrapper[4926]: I1125 19:29:35.997419 4926 scope.go:117] "RemoveContainer" containerID="aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.023761 4926 scope.go:117] "RemoveContainer" containerID="9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.044649 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-sg-core-conf-yaml\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.044713 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-config-data\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.044851 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-log-httpd\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.044948 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-ceilometer-tls-certs\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.044987 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-combined-ca-bundle\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.045030 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-scripts\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.045178 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-run-httpd\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.045233 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxj4t\" (UniqueName: \"kubernetes.io/projected/e92da70d-89aa-4e1f-9961-3cb2334fc573-kube-api-access-fxj4t\") pod \"e92da70d-89aa-4e1f-9961-3cb2334fc573\" (UID: \"e92da70d-89aa-4e1f-9961-3cb2334fc573\") " Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.047197 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.048022 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.048512 4926 scope.go:117] "RemoveContainer" containerID="70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.050759 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e92da70d-89aa-4e1f-9961-3cb2334fc573-kube-api-access-fxj4t" (OuterVolumeSpecName: "kube-api-access-fxj4t") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "kube-api-access-fxj4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.056970 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-scripts" (OuterVolumeSpecName: "scripts") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.085884 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.111481 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.147494 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.147531 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxj4t\" (UniqueName: \"kubernetes.io/projected/e92da70d-89aa-4e1f-9961-3cb2334fc573-kube-api-access-fxj4t\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.147546 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.147558 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e92da70d-89aa-4e1f-9961-3cb2334fc573-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.147569 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.147581 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.158772 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.188691 4926 scope.go:117] "RemoveContainer" containerID="9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.189589 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54\": container with ID starting with 9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54 not found: ID does not exist" containerID="9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.189638 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54"} err="failed to get container status \"9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54\": rpc error: code = NotFound desc = could not find container \"9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54\": container with ID starting with 9f46981793ea4996b21d65faf8c8e5aa9350bc40f97e04c1b171256e7701be54 not found: ID does not exist" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.189667 4926 scope.go:117] "RemoveContainer" containerID="360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.190233 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2\": container with ID starting with 360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2 not found: ID does not exist" containerID="360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.190295 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2"} err="failed to get container status \"360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2\": rpc error: code = NotFound desc = could not find container \"360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2\": container with ID starting with 360db3b7ba339fd95b1f74e37893db364e37f013a1bb7ca33b29bf7b84b66cf2 not found: ID does not exist" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.190424 4926 scope.go:117] "RemoveContainer" containerID="aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.190862 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569\": container with ID starting with aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569 not found: ID does not exist" containerID="aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.190919 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569"} err="failed to get container status \"aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569\": rpc error: code = NotFound desc = could not find container \"aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569\": container with ID starting with aa7c8019de6fa2fee794dee9c37a6231f0cdd56bc04a8cb2fdc6a1f6d4d76569 not found: ID does not exist" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.190948 4926 scope.go:117] "RemoveContainer" containerID="9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.191288 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad\": container with ID starting with 9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad not found: ID does not exist" containerID="9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.191313 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad"} err="failed to get container status \"9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad\": rpc error: code = NotFound desc = could not find container \"9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad\": container with ID starting with 9353869f24592cbe2e16d42326d37449de753d06d800da0f2b5a0152a9e04aad not found: ID does not exist" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.191328 4926 scope.go:117] "RemoveContainer" containerID="70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.192034 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438\": container with ID starting with 70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438 not found: ID does not exist" containerID="70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.192081 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438"} err="failed to get container status \"70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438\": rpc error: code = NotFound desc = could not find container \"70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438\": container with ID starting with 70eb42df35518230bbd66f167cfc06f67bae2e86435661fa4b9d212d5e8c5438 not found: ID does not exist" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.216179 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-config-data" (OuterVolumeSpecName: "config-data") pod "e92da70d-89aa-4e1f-9961-3cb2334fc573" (UID: "e92da70d-89aa-4e1f-9961-3cb2334fc573"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.248987 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.249017 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e92da70d-89aa-4e1f-9961-3cb2334fc573-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.279273 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.291432 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311252 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311646 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="proxy-httpd" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311664 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="proxy-httpd" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311678 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311685 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311705 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311712 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311722 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311728 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311735 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="extract-content" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311740 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="extract-content" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311751 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="extract-utilities" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311757 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="extract-utilities" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311770 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="extract-utilities" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311776 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="extract-utilities" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311790 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="registry-server" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311796 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="registry-server" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311807 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="sg-core" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311812 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="sg-core" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311825 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="registry-server" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311831 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="registry-server" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.311845 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="extract-content" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.311850 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="extract-content" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312024 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="63cf85c5-12a7-4265-ae81-e968e686668b" containerName="registry-server" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312041 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312050 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312066 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="sg-core" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312076 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312087 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="164bd684-7cad-4fd5-ae07-32ca3fd631ff" containerName="registry-server" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312094 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="proxy-httpd" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312103 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-central-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: E1125 19:29:36.312284 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.312290 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" containerName="ceilometer-notification-agent" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.313837 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.317684 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.317721 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.317871 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.359619 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e92da70d-89aa-4e1f-9961-3cb2334fc573" path="/var/lib/kubelet/pods/e92da70d-89aa-4e1f-9961-3cb2334fc573/volumes" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.360596 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.452609 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.452688 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-config-data\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.452733 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-scripts\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.452753 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.452870 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-log-httpd\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.452946 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.452985 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-run-httpd\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.453031 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbl28\" (UniqueName: \"kubernetes.io/projected/7ea24df3-75b6-4b45-9357-b0af0af7ae30-kube-api-access-qbl28\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555289 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbl28\" (UniqueName: \"kubernetes.io/projected/7ea24df3-75b6-4b45-9357-b0af0af7ae30-kube-api-access-qbl28\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555429 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555484 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-config-data\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555550 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-scripts\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555601 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555725 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-log-httpd\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555830 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.555889 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-run-httpd\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.556652 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-log-httpd\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.556740 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-run-httpd\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.560448 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-scripts\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.561404 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.561998 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-config-data\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.562423 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.563474 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.584631 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbl28\" (UniqueName: \"kubernetes.io/projected/7ea24df3-75b6-4b45-9357-b0af0af7ae30-kube-api-access-qbl28\") pod \"ceilometer-0\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " pod="openstack/ceilometer-0" Nov 25 19:29:36 crc kubenswrapper[4926]: I1125 19:29:36.629692 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:29:37 crc kubenswrapper[4926]: I1125 19:29:37.162322 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:29:37 crc kubenswrapper[4926]: I1125 19:29:37.957885 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerStarted","Data":"0bb5ab612fb0842fd3b5b8295035f432a9ba36df0a8c334f126cc51eea3469ab"} Nov 25 19:29:37 crc kubenswrapper[4926]: I1125 19:29:37.958480 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerStarted","Data":"29853106a4286bd6d4d6deb73bbebc3deaadaf8e4dfea1da489bae9a322a08f4"} Nov 25 19:29:37 crc kubenswrapper[4926]: I1125 19:29:37.958492 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerStarted","Data":"386962085c28c620a0b45aea56ba7acf8e5096b48529c41ce9fe1be6deac4755"} Nov 25 19:29:38 crc kubenswrapper[4926]: I1125 19:29:38.164128 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:29:38 crc kubenswrapper[4926]: I1125 19:29:38.885421 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 25 19:29:39 crc kubenswrapper[4926]: I1125 19:29:39.019771 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-0" Nov 25 19:29:39 crc kubenswrapper[4926]: I1125 19:29:39.036633 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-2-0" Nov 25 19:29:39 crc kubenswrapper[4926]: E1125 19:29:39.245448 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod900fd29e_9f47_40d8_b232_fca71cd10642.slice/crio-conmon-9435cbb74905b6ef93c6b440d7d9dc2e06bd13078770eb2d09f7981918b491a3.scope\": RecentStats: unable to find data in memory cache]" Nov 25 19:29:39 crc kubenswrapper[4926]: I1125 19:29:39.368010 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 19:29:39 crc kubenswrapper[4926]: I1125 19:29:39.982669 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerStarted","Data":"d59715b984c2711315fbe403dab6de06b629d67034cd8d06755d083fa9ba190b"} Nov 25 19:29:39 crc kubenswrapper[4926]: I1125 19:29:39.985864 4926 generic.go:334] "Generic (PLEG): container finished" podID="900fd29e-9f47-40d8-b232-fca71cd10642" containerID="9435cbb74905b6ef93c6b440d7d9dc2e06bd13078770eb2d09f7981918b491a3" exitCode=0 Nov 25 19:29:39 crc kubenswrapper[4926]: I1125 19:29:39.985915 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" event={"ID":"900fd29e-9f47-40d8-b232-fca71cd10642","Type":"ContainerDied","Data":"9435cbb74905b6ef93c6b440d7d9dc2e06bd13078770eb2d09f7981918b491a3"} Nov 25 19:29:39 crc kubenswrapper[4926]: I1125 19:29:39.985962 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" event={"ID":"900fd29e-9f47-40d8-b232-fca71cd10642","Type":"ContainerStarted","Data":"d9d776327a70dd5fb83f2107c80dd234efd6f399b61521dd308129228a83d643"} Nov 25 19:29:40 crc kubenswrapper[4926]: I1125 19:29:40.519931 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4pxkr container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]log ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]etcd excluded: ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]etcd-readiness excluded: ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 19:29:40 crc kubenswrapper[4926]: [-]informer-sync failed: reason withheld Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/max-in-flight-filter ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 19:29:40 crc kubenswrapper[4926]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 25 19:29:40 crc kubenswrapper[4926]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-startinformers ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 19:29:40 crc kubenswrapper[4926]: [+]shutdown ok Nov 25 19:29:40 crc kubenswrapper[4926]: readyz check failed Nov 25 19:29:40 crc kubenswrapper[4926]: I1125 19:29:40.520318 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" podUID="900fd29e-9f47-40d8-b232-fca71cd10642" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 19:29:41 crc kubenswrapper[4926]: I1125 19:29:41.006600 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerStarted","Data":"221f1d64d72c51bda539e431a184f009f21aeafddf31440d9c3a3d3006e4213b"} Nov 25 19:29:41 crc kubenswrapper[4926]: I1125 19:29:41.007143 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-central-agent" containerID="cri-o://29853106a4286bd6d4d6deb73bbebc3deaadaf8e4dfea1da489bae9a322a08f4" gracePeriod=30 Nov 25 19:29:41 crc kubenswrapper[4926]: I1125 19:29:41.007632 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 19:29:41 crc kubenswrapper[4926]: I1125 19:29:41.007693 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="proxy-httpd" containerID="cri-o://221f1d64d72c51bda539e431a184f009f21aeafddf31440d9c3a3d3006e4213b" gracePeriod=30 Nov 25 19:29:41 crc kubenswrapper[4926]: I1125 19:29:41.007781 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="sg-core" containerID="cri-o://d59715b984c2711315fbe403dab6de06b629d67034cd8d06755d083fa9ba190b" gracePeriod=30 Nov 25 19:29:41 crc kubenswrapper[4926]: I1125 19:29:41.007824 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-notification-agent" containerID="cri-o://0bb5ab612fb0842fd3b5b8295035f432a9ba36df0a8c334f126cc51eea3469ab" gracePeriod=30 Nov 25 19:29:41 crc kubenswrapper[4926]: I1125 19:29:41.036267 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.946288891 podStartE2EDuration="5.036248803s" podCreationTimestamp="2025-11-25 19:29:36 +0000 UTC" firstStartedPulling="2025-11-25 19:29:37.163049762 +0000 UTC m=+4607.548563377" lastFinishedPulling="2025-11-25 19:29:40.253009674 +0000 UTC m=+4610.638523289" observedRunningTime="2025-11-25 19:29:41.024382135 +0000 UTC m=+4611.409895740" watchObservedRunningTime="2025-11-25 19:29:41.036248803 +0000 UTC m=+4611.421762398" Nov 25 19:29:42 crc kubenswrapper[4926]: I1125 19:29:42.043364 4926 generic.go:334] "Generic (PLEG): container finished" podID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerID="d59715b984c2711315fbe403dab6de06b629d67034cd8d06755d083fa9ba190b" exitCode=2 Nov 25 19:29:42 crc kubenswrapper[4926]: I1125 19:29:42.043755 4926 generic.go:334] "Generic (PLEG): container finished" podID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerID="0bb5ab612fb0842fd3b5b8295035f432a9ba36df0a8c334f126cc51eea3469ab" exitCode=0 Nov 25 19:29:42 crc kubenswrapper[4926]: I1125 19:29:42.043787 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerDied","Data":"d59715b984c2711315fbe403dab6de06b629d67034cd8d06755d083fa9ba190b"} Nov 25 19:29:42 crc kubenswrapper[4926]: I1125 19:29:42.043811 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerDied","Data":"0bb5ab612fb0842fd3b5b8295035f432a9ba36df0a8c334f126cc51eea3469ab"} Nov 25 19:29:42 crc kubenswrapper[4926]: I1125 19:29:42.329332 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:29:42 crc kubenswrapper[4926]: E1125 19:29:42.329936 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:29:44 crc kubenswrapper[4926]: I1125 19:29:44.903260 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 19:29:44 crc kubenswrapper[4926]: I1125 19:29:44.903810 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 19:29:44 crc kubenswrapper[4926]: I1125 19:29:44.907639 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 19:29:45 crc kubenswrapper[4926]: I1125 19:29:45.078105 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-4pxkr" Nov 25 19:29:49 crc kubenswrapper[4926]: I1125 19:29:49.110465 4926 generic.go:334] "Generic (PLEG): container finished" podID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerID="29853106a4286bd6d4d6deb73bbebc3deaadaf8e4dfea1da489bae9a322a08f4" exitCode=0 Nov 25 19:29:49 crc kubenswrapper[4926]: I1125 19:29:49.111078 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerDied","Data":"29853106a4286bd6d4d6deb73bbebc3deaadaf8e4dfea1da489bae9a322a08f4"} Nov 25 19:29:57 crc kubenswrapper[4926]: I1125 19:29:57.329327 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:29:57 crc kubenswrapper[4926]: E1125 19:29:57.330235 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.195479 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr"] Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.197124 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.201612 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.201613 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.214756 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr"] Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.328740 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lf45\" (UniqueName: \"kubernetes.io/projected/874de401-fcce-4271-8d44-3df388134a89-kube-api-access-7lf45\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.328823 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/874de401-fcce-4271-8d44-3df388134a89-secret-volume\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.329215 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/874de401-fcce-4271-8d44-3df388134a89-config-volume\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.431048 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/874de401-fcce-4271-8d44-3df388134a89-secret-volume\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.431620 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/874de401-fcce-4271-8d44-3df388134a89-config-volume\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.431758 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lf45\" (UniqueName: \"kubernetes.io/projected/874de401-fcce-4271-8d44-3df388134a89-kube-api-access-7lf45\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.432492 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/874de401-fcce-4271-8d44-3df388134a89-config-volume\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.975093 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/874de401-fcce-4271-8d44-3df388134a89-secret-volume\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:00 crc kubenswrapper[4926]: I1125 19:30:00.981424 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lf45\" (UniqueName: \"kubernetes.io/projected/874de401-fcce-4271-8d44-3df388134a89-kube-api-access-7lf45\") pod \"collect-profiles-29401650-5t6jr\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:01 crc kubenswrapper[4926]: I1125 19:30:01.127212 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:01 crc kubenswrapper[4926]: I1125 19:30:01.618224 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr"] Nov 25 19:30:02 crc kubenswrapper[4926]: I1125 19:30:02.271910 4926 generic.go:334] "Generic (PLEG): container finished" podID="874de401-fcce-4271-8d44-3df388134a89" containerID="6a65ffbe0f74eefd3dd0f67e0a74ed9178c98e60be76f3ae986a4ff69380e78c" exitCode=0 Nov 25 19:30:02 crc kubenswrapper[4926]: I1125 19:30:02.272018 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" event={"ID":"874de401-fcce-4271-8d44-3df388134a89","Type":"ContainerDied","Data":"6a65ffbe0f74eefd3dd0f67e0a74ed9178c98e60be76f3ae986a4ff69380e78c"} Nov 25 19:30:02 crc kubenswrapper[4926]: I1125 19:30:02.272346 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" event={"ID":"874de401-fcce-4271-8d44-3df388134a89","Type":"ContainerStarted","Data":"a81babf27a6444b1ff9298f5a65e11681fbaab1784b3e6b768d5e3ed20d29151"} Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.714052 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.811310 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lf45\" (UniqueName: \"kubernetes.io/projected/874de401-fcce-4271-8d44-3df388134a89-kube-api-access-7lf45\") pod \"874de401-fcce-4271-8d44-3df388134a89\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.811374 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/874de401-fcce-4271-8d44-3df388134a89-secret-volume\") pod \"874de401-fcce-4271-8d44-3df388134a89\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.811577 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/874de401-fcce-4271-8d44-3df388134a89-config-volume\") pod \"874de401-fcce-4271-8d44-3df388134a89\" (UID: \"874de401-fcce-4271-8d44-3df388134a89\") " Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.812180 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/874de401-fcce-4271-8d44-3df388134a89-config-volume" (OuterVolumeSpecName: "config-volume") pod "874de401-fcce-4271-8d44-3df388134a89" (UID: "874de401-fcce-4271-8d44-3df388134a89"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.817083 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/874de401-fcce-4271-8d44-3df388134a89-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "874de401-fcce-4271-8d44-3df388134a89" (UID: "874de401-fcce-4271-8d44-3df388134a89"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.817545 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/874de401-fcce-4271-8d44-3df388134a89-kube-api-access-7lf45" (OuterVolumeSpecName: "kube-api-access-7lf45") pod "874de401-fcce-4271-8d44-3df388134a89" (UID: "874de401-fcce-4271-8d44-3df388134a89"). InnerVolumeSpecName "kube-api-access-7lf45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.914312 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/874de401-fcce-4271-8d44-3df388134a89-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.914358 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lf45\" (UniqueName: \"kubernetes.io/projected/874de401-fcce-4271-8d44-3df388134a89-kube-api-access-7lf45\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:03 crc kubenswrapper[4926]: I1125 19:30:03.914391 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/874de401-fcce-4271-8d44-3df388134a89-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:04 crc kubenswrapper[4926]: I1125 19:30:04.292636 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" event={"ID":"874de401-fcce-4271-8d44-3df388134a89","Type":"ContainerDied","Data":"a81babf27a6444b1ff9298f5a65e11681fbaab1784b3e6b768d5e3ed20d29151"} Nov 25 19:30:04 crc kubenswrapper[4926]: I1125 19:30:04.292922 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a81babf27a6444b1ff9298f5a65e11681fbaab1784b3e6b768d5e3ed20d29151" Nov 25 19:30:04 crc kubenswrapper[4926]: I1125 19:30:04.292973 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr" Nov 25 19:30:04 crc kubenswrapper[4926]: I1125 19:30:04.817134 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh"] Nov 25 19:30:04 crc kubenswrapper[4926]: I1125 19:30:04.825879 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401605-mnsnh"] Nov 25 19:30:06 crc kubenswrapper[4926]: I1125 19:30:06.340441 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf" path="/var/lib/kubelet/pods/f6d0e8b8-a0d2-40dd-b6a8-edc82afa2baf/volumes" Nov 25 19:30:06 crc kubenswrapper[4926]: I1125 19:30:06.652685 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 19:30:10 crc kubenswrapper[4926]: I1125 19:30:10.335726 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.419137 4926 generic.go:334] "Generic (PLEG): container finished" podID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerID="221f1d64d72c51bda539e431a184f009f21aeafddf31440d9c3a3d3006e4213b" exitCode=137 Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.420758 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerDied","Data":"221f1d64d72c51bda539e431a184f009f21aeafddf31440d9c3a3d3006e4213b"} Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.666837 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.681104 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-run-httpd\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.681209 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-ceilometer-tls-certs\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.681277 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-combined-ca-bundle\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.681342 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-config-data\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.681874 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.682297 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-scripts\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.682338 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-sg-core-conf-yaml\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.682362 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-log-httpd\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.682408 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbl28\" (UniqueName: \"kubernetes.io/projected/7ea24df3-75b6-4b45-9357-b0af0af7ae30-kube-api-access-qbl28\") pod \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\" (UID: \"7ea24df3-75b6-4b45-9357-b0af0af7ae30\") " Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.683076 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.683196 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.693993 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-scripts" (OuterVolumeSpecName: "scripts") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.703249 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ea24df3-75b6-4b45-9357-b0af0af7ae30-kube-api-access-qbl28" (OuterVolumeSpecName: "kube-api-access-qbl28") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "kube-api-access-qbl28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.782194 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.785279 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.785306 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.785319 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7ea24df3-75b6-4b45-9357-b0af0af7ae30-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.785334 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbl28\" (UniqueName: \"kubernetes.io/projected/7ea24df3-75b6-4b45-9357-b0af0af7ae30-kube-api-access-qbl28\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.809383 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.834617 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.876217 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-config-data" (OuterVolumeSpecName: "config-data") pod "7ea24df3-75b6-4b45-9357-b0af0af7ae30" (UID: "7ea24df3-75b6-4b45-9357-b0af0af7ae30"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.886890 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.886922 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:11 crc kubenswrapper[4926]: I1125 19:30:11.886934 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea24df3-75b6-4b45-9357-b0af0af7ae30-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.437715 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"51d8b9a7de314ce38698ed287c632a9fa8ce64b92f6f9d12c4dc62774aed87ef"} Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.446067 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7ea24df3-75b6-4b45-9357-b0af0af7ae30","Type":"ContainerDied","Data":"386962085c28c620a0b45aea56ba7acf8e5096b48529c41ce9fe1be6deac4755"} Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.446111 4926 scope.go:117] "RemoveContainer" containerID="221f1d64d72c51bda539e431a184f009f21aeafddf31440d9c3a3d3006e4213b" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.446242 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.487082 4926 scope.go:117] "RemoveContainer" containerID="d59715b984c2711315fbe403dab6de06b629d67034cd8d06755d083fa9ba190b" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.512105 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.529997 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.550466 4926 scope.go:117] "RemoveContainer" containerID="0bb5ab612fb0842fd3b5b8295035f432a9ba36df0a8c334f126cc51eea3469ab" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585040 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:30:12 crc kubenswrapper[4926]: E1125 19:30:12.585561 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-central-agent" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585578 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-central-agent" Nov 25 19:30:12 crc kubenswrapper[4926]: E1125 19:30:12.585604 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="sg-core" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585613 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="sg-core" Nov 25 19:30:12 crc kubenswrapper[4926]: E1125 19:30:12.585649 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="874de401-fcce-4271-8d44-3df388134a89" containerName="collect-profiles" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585657 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="874de401-fcce-4271-8d44-3df388134a89" containerName="collect-profiles" Nov 25 19:30:12 crc kubenswrapper[4926]: E1125 19:30:12.585679 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-notification-agent" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585688 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-notification-agent" Nov 25 19:30:12 crc kubenswrapper[4926]: E1125 19:30:12.585706 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="proxy-httpd" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585713 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="proxy-httpd" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585956 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-notification-agent" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585974 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="874de401-fcce-4271-8d44-3df388134a89" containerName="collect-profiles" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.585998 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="proxy-httpd" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.586021 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="ceilometer-central-agent" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.586030 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" containerName="sg-core" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.588228 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.598654 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.598832 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.601131 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605391 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605456 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9803cc53-b277-4aec-af80-cab4b4638a02-run-httpd\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605521 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605548 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vsxn\" (UniqueName: \"kubernetes.io/projected/9803cc53-b277-4aec-af80-cab4b4638a02-kube-api-access-9vsxn\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605605 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-scripts\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605646 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605679 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9803cc53-b277-4aec-af80-cab4b4638a02-log-httpd\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.605711 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-config-data\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.617080 4926 scope.go:117] "RemoveContainer" containerID="29853106a4286bd6d4d6deb73bbebc3deaadaf8e4dfea1da489bae9a322a08f4" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.618788 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.707928 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9803cc53-b277-4aec-af80-cab4b4638a02-run-httpd\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708010 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708038 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vsxn\" (UniqueName: \"kubernetes.io/projected/9803cc53-b277-4aec-af80-cab4b4638a02-kube-api-access-9vsxn\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708098 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-scripts\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708131 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708168 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9803cc53-b277-4aec-af80-cab4b4638a02-log-httpd\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708200 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-config-data\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708275 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708494 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9803cc53-b277-4aec-af80-cab4b4638a02-run-httpd\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:12 crc kubenswrapper[4926]: I1125 19:30:12.708733 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9803cc53-b277-4aec-af80-cab4b4638a02-log-httpd\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:13 crc kubenswrapper[4926]: I1125 19:30:13.382714 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:13 crc kubenswrapper[4926]: I1125 19:30:13.383198 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-scripts\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:13 crc kubenswrapper[4926]: I1125 19:30:13.384431 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:13 crc kubenswrapper[4926]: I1125 19:30:13.384527 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:13 crc kubenswrapper[4926]: I1125 19:30:13.384696 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vsxn\" (UniqueName: \"kubernetes.io/projected/9803cc53-b277-4aec-af80-cab4b4638a02-kube-api-access-9vsxn\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:13 crc kubenswrapper[4926]: I1125 19:30:13.392612 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9803cc53-b277-4aec-af80-cab4b4638a02-config-data\") pod \"ceilometer-0\" (UID: \"9803cc53-b277-4aec-af80-cab4b4638a02\") " pod="openstack/ceilometer-0" Nov 25 19:30:13 crc kubenswrapper[4926]: I1125 19:30:13.579467 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 19:30:14 crc kubenswrapper[4926]: W1125 19:30:14.100705 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9803cc53_b277_4aec_af80_cab4b4638a02.slice/crio-3eb1ea86b85bba33622f14bb22fe1b300f5fd2e6264ffd1217a19ce97867c9cf WatchSource:0}: Error finding container 3eb1ea86b85bba33622f14bb22fe1b300f5fd2e6264ffd1217a19ce97867c9cf: Status 404 returned error can't find the container with id 3eb1ea86b85bba33622f14bb22fe1b300f5fd2e6264ffd1217a19ce97867c9cf Nov 25 19:30:14 crc kubenswrapper[4926]: I1125 19:30:14.107628 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 19:30:14 crc kubenswrapper[4926]: I1125 19:30:14.338492 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ea24df3-75b6-4b45-9357-b0af0af7ae30" path="/var/lib/kubelet/pods/7ea24df3-75b6-4b45-9357-b0af0af7ae30/volumes" Nov 25 19:30:14 crc kubenswrapper[4926]: I1125 19:30:14.503104 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9803cc53-b277-4aec-af80-cab4b4638a02","Type":"ContainerStarted","Data":"3eb1ea86b85bba33622f14bb22fe1b300f5fd2e6264ffd1217a19ce97867c9cf"} Nov 25 19:30:15 crc kubenswrapper[4926]: I1125 19:30:15.521625 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9803cc53-b277-4aec-af80-cab4b4638a02","Type":"ContainerStarted","Data":"e74fab444a5304765a14ed2692d71ea79fd5856303339d7e02976a6d406fc7ce"} Nov 25 19:30:16 crc kubenswrapper[4926]: I1125 19:30:16.540424 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9803cc53-b277-4aec-af80-cab4b4638a02","Type":"ContainerStarted","Data":"c54c8f342d34d38e4c481a1c4b72efbd3c0e80692c305f5cac44b29ff385e030"} Nov 25 19:30:17 crc kubenswrapper[4926]: I1125 19:30:17.555815 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9803cc53-b277-4aec-af80-cab4b4638a02","Type":"ContainerStarted","Data":"b86beebdd7a1dd41cfde73a915c00f76cb58a2e61ff590f7e7c138c51e120028"} Nov 25 19:30:18 crc kubenswrapper[4926]: I1125 19:30:18.568907 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9803cc53-b277-4aec-af80-cab4b4638a02","Type":"ContainerStarted","Data":"b57cb46f43950182219de2ffdddf043a1da1ed7da3ab929ae5460d4444bfba66"} Nov 25 19:30:18 crc kubenswrapper[4926]: I1125 19:30:18.571098 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 19:30:18 crc kubenswrapper[4926]: I1125 19:30:18.614329 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.079652189 podStartE2EDuration="6.614306877s" podCreationTimestamp="2025-11-25 19:30:12 +0000 UTC" firstStartedPulling="2025-11-25 19:30:14.102904835 +0000 UTC m=+4644.488418440" lastFinishedPulling="2025-11-25 19:30:17.637559503 +0000 UTC m=+4648.023073128" observedRunningTime="2025-11-25 19:30:18.600031205 +0000 UTC m=+4648.985544830" watchObservedRunningTime="2025-11-25 19:30:18.614306877 +0000 UTC m=+4648.999820482" Nov 25 19:30:24 crc kubenswrapper[4926]: I1125 19:30:24.472719 4926 scope.go:117] "RemoveContainer" containerID="febd321d2687a4991200dbc663900495d77a00393c46000ae9ba8e5d734cd78b" Nov 25 19:30:43 crc kubenswrapper[4926]: I1125 19:30:43.590489 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 19:31:12 crc kubenswrapper[4926]: E1125 19:31:12.503891 4926 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.212:43620->38.102.83.212:46611: write tcp 38.102.83.212:43620->38.102.83.212:46611: write: broken pipe Nov 25 19:32:33 crc kubenswrapper[4926]: I1125 19:32:33.541822 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:32:33 crc kubenswrapper[4926]: I1125 19:32:33.543247 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:33:03 crc kubenswrapper[4926]: I1125 19:33:03.541871 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:33:03 crc kubenswrapper[4926]: I1125 19:33:03.542337 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:33:33 crc kubenswrapper[4926]: I1125 19:33:33.541922 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:33:33 crc kubenswrapper[4926]: I1125 19:33:33.542602 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:33:33 crc kubenswrapper[4926]: I1125 19:33:33.542667 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:33:33 crc kubenswrapper[4926]: I1125 19:33:33.544028 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"51d8b9a7de314ce38698ed287c632a9fa8ce64b92f6f9d12c4dc62774aed87ef"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:33:33 crc kubenswrapper[4926]: I1125 19:33:33.544159 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://51d8b9a7de314ce38698ed287c632a9fa8ce64b92f6f9d12c4dc62774aed87ef" gracePeriod=600 Nov 25 19:33:34 crc kubenswrapper[4926]: I1125 19:33:34.032855 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="51d8b9a7de314ce38698ed287c632a9fa8ce64b92f6f9d12c4dc62774aed87ef" exitCode=0 Nov 25 19:33:34 crc kubenswrapper[4926]: I1125 19:33:34.032935 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"51d8b9a7de314ce38698ed287c632a9fa8ce64b92f6f9d12c4dc62774aed87ef"} Nov 25 19:33:34 crc kubenswrapper[4926]: I1125 19:33:34.033323 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5"} Nov 25 19:33:34 crc kubenswrapper[4926]: I1125 19:33:34.033358 4926 scope.go:117] "RemoveContainer" containerID="7d696e15f14650653d7c12e5bfdb0658411ca6dbedecec374f178d407c54cf1e" Nov 25 19:35:33 crc kubenswrapper[4926]: I1125 19:35:33.541639 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:35:33 crc kubenswrapper[4926]: I1125 19:35:33.542342 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:36:03 crc kubenswrapper[4926]: I1125 19:36:03.540979 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:36:03 crc kubenswrapper[4926]: I1125 19:36:03.541685 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:36:33 crc kubenswrapper[4926]: I1125 19:36:33.541845 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:36:33 crc kubenswrapper[4926]: I1125 19:36:33.542538 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:36:33 crc kubenswrapper[4926]: I1125 19:36:33.542594 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:36:33 crc kubenswrapper[4926]: I1125 19:36:33.543601 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:36:33 crc kubenswrapper[4926]: I1125 19:36:33.543686 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" gracePeriod=600 Nov 25 19:36:33 crc kubenswrapper[4926]: E1125 19:36:33.736703 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:36:35 crc kubenswrapper[4926]: I1125 19:36:35.060457 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" exitCode=0 Nov 25 19:36:35 crc kubenswrapper[4926]: I1125 19:36:35.097493 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5"} Nov 25 19:36:35 crc kubenswrapper[4926]: I1125 19:36:35.097547 4926 scope.go:117] "RemoveContainer" containerID="51d8b9a7de314ce38698ed287c632a9fa8ce64b92f6f9d12c4dc62774aed87ef" Nov 25 19:36:35 crc kubenswrapper[4926]: I1125 19:36:35.098911 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:36:35 crc kubenswrapper[4926]: E1125 19:36:35.099412 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:36:50 crc kubenswrapper[4926]: I1125 19:36:50.338309 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:36:50 crc kubenswrapper[4926]: E1125 19:36:50.339533 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.650622 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2xgvv"] Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.656491 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.673980 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2xgvv"] Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.701111 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a165105a-b61e-4c9a-a0fd-b01562eea725-catalog-content\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.701165 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a165105a-b61e-4c9a-a0fd-b01562eea725-utilities\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.701273 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcgdf\" (UniqueName: \"kubernetes.io/projected/a165105a-b61e-4c9a-a0fd-b01562eea725-kube-api-access-mcgdf\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.803071 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a165105a-b61e-4c9a-a0fd-b01562eea725-catalog-content\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.803123 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a165105a-b61e-4c9a-a0fd-b01562eea725-utilities\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.803198 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcgdf\" (UniqueName: \"kubernetes.io/projected/a165105a-b61e-4c9a-a0fd-b01562eea725-kube-api-access-mcgdf\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.803676 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a165105a-b61e-4c9a-a0fd-b01562eea725-utilities\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.803557 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a165105a-b61e-4c9a-a0fd-b01562eea725-catalog-content\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.826184 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcgdf\" (UniqueName: \"kubernetes.io/projected/a165105a-b61e-4c9a-a0fd-b01562eea725-kube-api-access-mcgdf\") pod \"redhat-operators-2xgvv\" (UID: \"a165105a-b61e-4c9a-a0fd-b01562eea725\") " pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:03 crc kubenswrapper[4926]: I1125 19:37:03.995674 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:04 crc kubenswrapper[4926]: I1125 19:37:04.336053 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:37:04 crc kubenswrapper[4926]: E1125 19:37:04.336670 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:37:04 crc kubenswrapper[4926]: I1125 19:37:04.501704 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2xgvv"] Nov 25 19:37:05 crc kubenswrapper[4926]: I1125 19:37:05.428814 4926 generic.go:334] "Generic (PLEG): container finished" podID="a165105a-b61e-4c9a-a0fd-b01562eea725" containerID="0a0f09f3b1d9b0d045736c63bdf9c843cffc262e375937215c2432242a583e92" exitCode=0 Nov 25 19:37:05 crc kubenswrapper[4926]: I1125 19:37:05.428879 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgvv" event={"ID":"a165105a-b61e-4c9a-a0fd-b01562eea725","Type":"ContainerDied","Data":"0a0f09f3b1d9b0d045736c63bdf9c843cffc262e375937215c2432242a583e92"} Nov 25 19:37:05 crc kubenswrapper[4926]: I1125 19:37:05.429074 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgvv" event={"ID":"a165105a-b61e-4c9a-a0fd-b01562eea725","Type":"ContainerStarted","Data":"3b8df04eebceba8ec038d6eba4f6c3745b0407eb223614916d981c3a29dca26c"} Nov 25 19:37:05 crc kubenswrapper[4926]: I1125 19:37:05.432975 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.467066 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sn89f"] Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.484581 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.492837 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sn89f"] Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.587755 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-catalog-content\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.588054 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw984\" (UniqueName: \"kubernetes.io/projected/99943c56-75fe-4c1a-88a3-10339a53e438-kube-api-access-tw984\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.588182 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-utilities\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.690175 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-utilities\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.690265 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-catalog-content\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.690308 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw984\" (UniqueName: \"kubernetes.io/projected/99943c56-75fe-4c1a-88a3-10339a53e438-kube-api-access-tw984\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.691160 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-utilities\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.691550 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-catalog-content\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:07 crc kubenswrapper[4926]: I1125 19:37:07.876181 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw984\" (UniqueName: \"kubernetes.io/projected/99943c56-75fe-4c1a-88a3-10339a53e438-kube-api-access-tw984\") pod \"community-operators-sn89f\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:08 crc kubenswrapper[4926]: I1125 19:37:08.121539 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:08 crc kubenswrapper[4926]: I1125 19:37:08.627339 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sn89f"] Nov 25 19:37:08 crc kubenswrapper[4926]: W1125 19:37:08.648013 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99943c56_75fe_4c1a_88a3_10339a53e438.slice/crio-14159e3482ac726f754c32193e9402f3b3e64917cce81c699705694da4659cae WatchSource:0}: Error finding container 14159e3482ac726f754c32193e9402f3b3e64917cce81c699705694da4659cae: Status 404 returned error can't find the container with id 14159e3482ac726f754c32193e9402f3b3e64917cce81c699705694da4659cae Nov 25 19:37:09 crc kubenswrapper[4926]: I1125 19:37:09.522861 4926 generic.go:334] "Generic (PLEG): container finished" podID="99943c56-75fe-4c1a-88a3-10339a53e438" containerID="f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9" exitCode=0 Nov 25 19:37:09 crc kubenswrapper[4926]: I1125 19:37:09.522967 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn89f" event={"ID":"99943c56-75fe-4c1a-88a3-10339a53e438","Type":"ContainerDied","Data":"f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9"} Nov 25 19:37:09 crc kubenswrapper[4926]: I1125 19:37:09.523328 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn89f" event={"ID":"99943c56-75fe-4c1a-88a3-10339a53e438","Type":"ContainerStarted","Data":"14159e3482ac726f754c32193e9402f3b3e64917cce81c699705694da4659cae"} Nov 25 19:37:14 crc kubenswrapper[4926]: I1125 19:37:14.583687 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgvv" event={"ID":"a165105a-b61e-4c9a-a0fd-b01562eea725","Type":"ContainerStarted","Data":"6ed0335f730a15bf5a73d0397cc6fccf1c8d994a428aeaa49f4c40323afc8454"} Nov 25 19:37:14 crc kubenswrapper[4926]: I1125 19:37:14.586408 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn89f" event={"ID":"99943c56-75fe-4c1a-88a3-10339a53e438","Type":"ContainerStarted","Data":"6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67"} Nov 25 19:37:15 crc kubenswrapper[4926]: I1125 19:37:15.611952 4926 generic.go:334] "Generic (PLEG): container finished" podID="a165105a-b61e-4c9a-a0fd-b01562eea725" containerID="6ed0335f730a15bf5a73d0397cc6fccf1c8d994a428aeaa49f4c40323afc8454" exitCode=0 Nov 25 19:37:15 crc kubenswrapper[4926]: I1125 19:37:15.612013 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgvv" event={"ID":"a165105a-b61e-4c9a-a0fd-b01562eea725","Type":"ContainerDied","Data":"6ed0335f730a15bf5a73d0397cc6fccf1c8d994a428aeaa49f4c40323afc8454"} Nov 25 19:37:16 crc kubenswrapper[4926]: I1125 19:37:16.637098 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2xgvv" event={"ID":"a165105a-b61e-4c9a-a0fd-b01562eea725","Type":"ContainerStarted","Data":"99dfb8125b123d6fa1c787c3acd02e992a5c075873d59a062e4282880336c257"} Nov 25 19:37:17 crc kubenswrapper[4926]: I1125 19:37:17.674668 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2xgvv" podStartSLOduration=3.9344499649999998 podStartE2EDuration="14.674645977s" podCreationTimestamp="2025-11-25 19:37:03 +0000 UTC" firstStartedPulling="2025-11-25 19:37:05.432579546 +0000 UTC m=+5055.818093191" lastFinishedPulling="2025-11-25 19:37:16.172775588 +0000 UTC m=+5066.558289203" observedRunningTime="2025-11-25 19:37:17.66487647 +0000 UTC m=+5068.050390075" watchObservedRunningTime="2025-11-25 19:37:17.674645977 +0000 UTC m=+5068.060159582" Nov 25 19:37:18 crc kubenswrapper[4926]: I1125 19:37:18.663479 4926 generic.go:334] "Generic (PLEG): container finished" podID="99943c56-75fe-4c1a-88a3-10339a53e438" containerID="6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67" exitCode=0 Nov 25 19:37:18 crc kubenswrapper[4926]: I1125 19:37:18.663559 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn89f" event={"ID":"99943c56-75fe-4c1a-88a3-10339a53e438","Type":"ContainerDied","Data":"6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67"} Nov 25 19:37:19 crc kubenswrapper[4926]: I1125 19:37:19.329754 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:37:19 crc kubenswrapper[4926]: E1125 19:37:19.330401 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:37:20 crc kubenswrapper[4926]: I1125 19:37:20.703390 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn89f" event={"ID":"99943c56-75fe-4c1a-88a3-10339a53e438","Type":"ContainerStarted","Data":"888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85"} Nov 25 19:37:20 crc kubenswrapper[4926]: I1125 19:37:20.729019 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sn89f" podStartSLOduration=4.177954986 podStartE2EDuration="13.728998675s" podCreationTimestamp="2025-11-25 19:37:07 +0000 UTC" firstStartedPulling="2025-11-25 19:37:09.52700889 +0000 UTC m=+5059.912522505" lastFinishedPulling="2025-11-25 19:37:19.078052599 +0000 UTC m=+5069.463566194" observedRunningTime="2025-11-25 19:37:20.726947458 +0000 UTC m=+5071.112461063" watchObservedRunningTime="2025-11-25 19:37:20.728998675 +0000 UTC m=+5071.114512280" Nov 25 19:37:23 crc kubenswrapper[4926]: I1125 19:37:23.995916 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:23 crc kubenswrapper[4926]: I1125 19:37:23.996338 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:24 crc kubenswrapper[4926]: I1125 19:37:24.048862 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:24 crc kubenswrapper[4926]: I1125 19:37:24.825014 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2xgvv" Nov 25 19:37:24 crc kubenswrapper[4926]: I1125 19:37:24.899935 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2xgvv"] Nov 25 19:37:24 crc kubenswrapper[4926]: I1125 19:37:24.960077 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kkvkh"] Nov 25 19:37:24 crc kubenswrapper[4926]: I1125 19:37:24.960643 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kkvkh" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" containerID="cri-o://cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d" gracePeriod=2 Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.564225 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.706017 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wbvt\" (UniqueName: \"kubernetes.io/projected/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-kube-api-access-5wbvt\") pod \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.706793 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-utilities\") pod \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.706920 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-catalog-content\") pod \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\" (UID: \"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d\") " Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.707415 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-utilities" (OuterVolumeSpecName: "utilities") pod "56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" (UID: "56ba6a4b-3a4e-4c97-9de7-9ac7963a230d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.707925 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.716805 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-kube-api-access-5wbvt" (OuterVolumeSpecName: "kube-api-access-5wbvt") pod "56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" (UID: "56ba6a4b-3a4e-4c97-9de7-9ac7963a230d"). InnerVolumeSpecName "kube-api-access-5wbvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.774766 4926 generic.go:334] "Generic (PLEG): container finished" podID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerID="cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d" exitCode=0 Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.774862 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kkvkh" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.774871 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kkvkh" event={"ID":"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d","Type":"ContainerDied","Data":"cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d"} Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.774921 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kkvkh" event={"ID":"56ba6a4b-3a4e-4c97-9de7-9ac7963a230d","Type":"ContainerDied","Data":"56b654b543ecdf4815f79b89d60ef55e7dd43a936e57992e7862704c7d264043"} Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.774941 4926 scope.go:117] "RemoveContainer" containerID="cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.798636 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" (UID: "56ba6a4b-3a4e-4c97-9de7-9ac7963a230d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.802235 4926 scope.go:117] "RemoveContainer" containerID="f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.810509 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wbvt\" (UniqueName: \"kubernetes.io/projected/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-kube-api-access-5wbvt\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.810543 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.842669 4926 scope.go:117] "RemoveContainer" containerID="51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.888063 4926 scope.go:117] "RemoveContainer" containerID="cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d" Nov 25 19:37:25 crc kubenswrapper[4926]: E1125 19:37:25.888545 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d\": container with ID starting with cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d not found: ID does not exist" containerID="cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.888577 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d"} err="failed to get container status \"cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d\": rpc error: code = NotFound desc = could not find container \"cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d\": container with ID starting with cde125c36831c1da1dc2a1ee529b4d75ce17a0d74ecef033e57684503bf82b9d not found: ID does not exist" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.888600 4926 scope.go:117] "RemoveContainer" containerID="f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad" Nov 25 19:37:25 crc kubenswrapper[4926]: E1125 19:37:25.888947 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad\": container with ID starting with f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad not found: ID does not exist" containerID="f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.888969 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad"} err="failed to get container status \"f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad\": rpc error: code = NotFound desc = could not find container \"f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad\": container with ID starting with f2e9de1bda985f907000e1ac8946a43e06d149c8080e7539241db32b92a7b8ad not found: ID does not exist" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.888982 4926 scope.go:117] "RemoveContainer" containerID="51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6" Nov 25 19:37:25 crc kubenswrapper[4926]: E1125 19:37:25.889387 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6\": container with ID starting with 51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6 not found: ID does not exist" containerID="51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6" Nov 25 19:37:25 crc kubenswrapper[4926]: I1125 19:37:25.889436 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6"} err="failed to get container status \"51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6\": rpc error: code = NotFound desc = could not find container \"51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6\": container with ID starting with 51ad7427bd33b304eac226b515e281bcec3e92c40d54cba5dee14a374dfa8ca6 not found: ID does not exist" Nov 25 19:37:26 crc kubenswrapper[4926]: I1125 19:37:26.135246 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kkvkh"] Nov 25 19:37:26 crc kubenswrapper[4926]: I1125 19:37:26.143307 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kkvkh"] Nov 25 19:37:26 crc kubenswrapper[4926]: I1125 19:37:26.339178 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" path="/var/lib/kubelet/pods/56ba6a4b-3a4e-4c97-9de7-9ac7963a230d/volumes" Nov 25 19:37:28 crc kubenswrapper[4926]: I1125 19:37:28.123014 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:28 crc kubenswrapper[4926]: I1125 19:37:28.123301 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:28 crc kubenswrapper[4926]: I1125 19:37:28.184710 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:28 crc kubenswrapper[4926]: I1125 19:37:28.847926 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:29 crc kubenswrapper[4926]: I1125 19:37:29.499397 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sn89f"] Nov 25 19:37:30 crc kubenswrapper[4926]: I1125 19:37:30.834667 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sn89f" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="registry-server" containerID="cri-o://888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85" gracePeriod=2 Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.512758 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.643549 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-catalog-content\") pod \"99943c56-75fe-4c1a-88a3-10339a53e438\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.643663 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-utilities\") pod \"99943c56-75fe-4c1a-88a3-10339a53e438\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.643749 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tw984\" (UniqueName: \"kubernetes.io/projected/99943c56-75fe-4c1a-88a3-10339a53e438-kube-api-access-tw984\") pod \"99943c56-75fe-4c1a-88a3-10339a53e438\" (UID: \"99943c56-75fe-4c1a-88a3-10339a53e438\") " Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.644223 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-utilities" (OuterVolumeSpecName: "utilities") pod "99943c56-75fe-4c1a-88a3-10339a53e438" (UID: "99943c56-75fe-4c1a-88a3-10339a53e438"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.649631 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99943c56-75fe-4c1a-88a3-10339a53e438-kube-api-access-tw984" (OuterVolumeSpecName: "kube-api-access-tw984") pod "99943c56-75fe-4c1a-88a3-10339a53e438" (UID: "99943c56-75fe-4c1a-88a3-10339a53e438"). InnerVolumeSpecName "kube-api-access-tw984". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.712300 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "99943c56-75fe-4c1a-88a3-10339a53e438" (UID: "99943c56-75fe-4c1a-88a3-10339a53e438"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.746761 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.746811 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99943c56-75fe-4c1a-88a3-10339a53e438-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.746821 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tw984\" (UniqueName: \"kubernetes.io/projected/99943c56-75fe-4c1a-88a3-10339a53e438-kube-api-access-tw984\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.866817 4926 generic.go:334] "Generic (PLEG): container finished" podID="99943c56-75fe-4c1a-88a3-10339a53e438" containerID="888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85" exitCode=0 Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.866894 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn89f" event={"ID":"99943c56-75fe-4c1a-88a3-10339a53e438","Type":"ContainerDied","Data":"888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85"} Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.866942 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn89f" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.866980 4926 scope.go:117] "RemoveContainer" containerID="888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.866961 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn89f" event={"ID":"99943c56-75fe-4c1a-88a3-10339a53e438","Type":"ContainerDied","Data":"14159e3482ac726f754c32193e9402f3b3e64917cce81c699705694da4659cae"} Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.907607 4926 scope.go:117] "RemoveContainer" containerID="6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.949841 4926 scope.go:117] "RemoveContainer" containerID="f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.957293 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sn89f"] Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.966957 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sn89f"] Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.997229 4926 scope.go:117] "RemoveContainer" containerID="888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85" Nov 25 19:37:31 crc kubenswrapper[4926]: E1125 19:37:31.997761 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85\": container with ID starting with 888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85 not found: ID does not exist" containerID="888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.997804 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85"} err="failed to get container status \"888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85\": rpc error: code = NotFound desc = could not find container \"888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85\": container with ID starting with 888565cdcaffdf4d37b16b7da14dd40fb546b363dd5fd4fa7b0f5e954326fa85 not found: ID does not exist" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.997830 4926 scope.go:117] "RemoveContainer" containerID="6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67" Nov 25 19:37:31 crc kubenswrapper[4926]: E1125 19:37:31.998271 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67\": container with ID starting with 6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67 not found: ID does not exist" containerID="6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.998290 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67"} err="failed to get container status \"6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67\": rpc error: code = NotFound desc = could not find container \"6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67\": container with ID starting with 6ac9398931ae3e4c6bc17d8349ce8f4db1005b219ee338dc981ea5b9420c0d67 not found: ID does not exist" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.998302 4926 scope.go:117] "RemoveContainer" containerID="f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9" Nov 25 19:37:31 crc kubenswrapper[4926]: E1125 19:37:31.998699 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9\": container with ID starting with f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9 not found: ID does not exist" containerID="f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9" Nov 25 19:37:31 crc kubenswrapper[4926]: I1125 19:37:31.998740 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9"} err="failed to get container status \"f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9\": rpc error: code = NotFound desc = could not find container \"f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9\": container with ID starting with f6a12fa4f850df1cede698123c49aae99cc56227ff1b2e83924230a3b6d7ffb9 not found: ID does not exist" Nov 25 19:37:32 crc kubenswrapper[4926]: I1125 19:37:32.329556 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:37:32 crc kubenswrapper[4926]: E1125 19:37:32.330157 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:37:32 crc kubenswrapper[4926]: I1125 19:37:32.343369 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" path="/var/lib/kubelet/pods/99943c56-75fe-4c1a-88a3-10339a53e438/volumes" Nov 25 19:37:43 crc kubenswrapper[4926]: I1125 19:37:43.328803 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:37:43 crc kubenswrapper[4926]: E1125 19:37:43.329733 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.702544 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5d5rc"] Nov 25 19:37:45 crc kubenswrapper[4926]: E1125 19:37:45.703413 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="extract-content" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.703630 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="extract-content" Nov 25 19:37:45 crc kubenswrapper[4926]: E1125 19:37:45.703664 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="registry-server" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.703672 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="registry-server" Nov 25 19:37:45 crc kubenswrapper[4926]: E1125 19:37:45.703695 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.703704 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" Nov 25 19:37:45 crc kubenswrapper[4926]: E1125 19:37:45.703715 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="extract-utilities" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.703723 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="extract-utilities" Nov 25 19:37:45 crc kubenswrapper[4926]: E1125 19:37:45.703734 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="extract-utilities" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.703741 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="extract-utilities" Nov 25 19:37:45 crc kubenswrapper[4926]: E1125 19:37:45.703771 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="extract-content" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.703778 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="extract-content" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.704045 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="99943c56-75fe-4c1a-88a3-10339a53e438" containerName="registry-server" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.704067 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="56ba6a4b-3a4e-4c97-9de7-9ac7963a230d" containerName="registry-server" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.706029 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.712992 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5d5rc"] Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.800362 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dtjj\" (UniqueName: \"kubernetes.io/projected/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-kube-api-access-5dtjj\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.800456 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-catalog-content\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.800564 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-utilities\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.903275 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dtjj\" (UniqueName: \"kubernetes.io/projected/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-kube-api-access-5dtjj\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.903366 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-catalog-content\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.903545 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-utilities\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.903936 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-catalog-content\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.904146 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-utilities\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:45 crc kubenswrapper[4926]: I1125 19:37:45.925428 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dtjj\" (UniqueName: \"kubernetes.io/projected/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-kube-api-access-5dtjj\") pod \"certified-operators-5d5rc\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:46 crc kubenswrapper[4926]: I1125 19:37:46.038196 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:46 crc kubenswrapper[4926]: I1125 19:37:46.559644 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5d5rc"] Nov 25 19:37:47 crc kubenswrapper[4926]: I1125 19:37:47.067663 4926 generic.go:334] "Generic (PLEG): container finished" podID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerID="22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f" exitCode=0 Nov 25 19:37:47 crc kubenswrapper[4926]: I1125 19:37:47.067771 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5d5rc" event={"ID":"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb","Type":"ContainerDied","Data":"22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f"} Nov 25 19:37:47 crc kubenswrapper[4926]: I1125 19:37:47.067955 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5d5rc" event={"ID":"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb","Type":"ContainerStarted","Data":"77251409760f28fac363db0cf7c3f75b6788d5e4bd23c873d6ae895ed1a51a51"} Nov 25 19:37:48 crc kubenswrapper[4926]: I1125 19:37:48.097490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5d5rc" event={"ID":"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb","Type":"ContainerStarted","Data":"09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc"} Nov 25 19:37:49 crc kubenswrapper[4926]: I1125 19:37:49.111535 4926 generic.go:334] "Generic (PLEG): container finished" podID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerID="09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc" exitCode=0 Nov 25 19:37:49 crc kubenswrapper[4926]: I1125 19:37:49.111595 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5d5rc" event={"ID":"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb","Type":"ContainerDied","Data":"09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc"} Nov 25 19:37:50 crc kubenswrapper[4926]: I1125 19:37:50.131401 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5d5rc" event={"ID":"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb","Type":"ContainerStarted","Data":"ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559"} Nov 25 19:37:50 crc kubenswrapper[4926]: I1125 19:37:50.156600 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5d5rc" podStartSLOduration=2.662944032 podStartE2EDuration="5.15658181s" podCreationTimestamp="2025-11-25 19:37:45 +0000 UTC" firstStartedPulling="2025-11-25 19:37:47.072640856 +0000 UTC m=+5097.458154461" lastFinishedPulling="2025-11-25 19:37:49.566278624 +0000 UTC m=+5099.951792239" observedRunningTime="2025-11-25 19:37:50.152272112 +0000 UTC m=+5100.537785727" watchObservedRunningTime="2025-11-25 19:37:50.15658181 +0000 UTC m=+5100.542095425" Nov 25 19:37:56 crc kubenswrapper[4926]: I1125 19:37:56.038877 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:56 crc kubenswrapper[4926]: I1125 19:37:56.040666 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:56 crc kubenswrapper[4926]: I1125 19:37:56.123986 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:56 crc kubenswrapper[4926]: I1125 19:37:56.265277 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:56 crc kubenswrapper[4926]: I1125 19:37:56.368432 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5d5rc"] Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.229215 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5d5rc" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="registry-server" containerID="cri-o://ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559" gracePeriod=2 Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.330558 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:37:58 crc kubenswrapper[4926]: E1125 19:37:58.331159 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.823348 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.943319 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-utilities\") pod \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.943628 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dtjj\" (UniqueName: \"kubernetes.io/projected/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-kube-api-access-5dtjj\") pod \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.943830 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-catalog-content\") pod \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\" (UID: \"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb\") " Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.944173 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-utilities" (OuterVolumeSpecName: "utilities") pod "bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" (UID: "bed570ec-517d-49b6-a4d5-d7ba38a5bfbb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.944609 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:58 crc kubenswrapper[4926]: I1125 19:37:58.967910 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-kube-api-access-5dtjj" (OuterVolumeSpecName: "kube-api-access-5dtjj") pod "bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" (UID: "bed570ec-517d-49b6-a4d5-d7ba38a5bfbb"). InnerVolumeSpecName "kube-api-access-5dtjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.047304 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dtjj\" (UniqueName: \"kubernetes.io/projected/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-kube-api-access-5dtjj\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.158860 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" (UID: "bed570ec-517d-49b6-a4d5-d7ba38a5bfbb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.241809 4926 generic.go:334] "Generic (PLEG): container finished" podID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerID="ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559" exitCode=0 Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.241867 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5d5rc" event={"ID":"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb","Type":"ContainerDied","Data":"ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559"} Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.241941 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5d5rc" event={"ID":"bed570ec-517d-49b6-a4d5-d7ba38a5bfbb","Type":"ContainerDied","Data":"77251409760f28fac363db0cf7c3f75b6788d5e4bd23c873d6ae895ed1a51a51"} Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.241933 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5d5rc" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.241959 4926 scope.go:117] "RemoveContainer" containerID="ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.251695 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.273195 4926 scope.go:117] "RemoveContainer" containerID="09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.283870 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5d5rc"] Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.294929 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5d5rc"] Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.308063 4926 scope.go:117] "RemoveContainer" containerID="22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.359239 4926 scope.go:117] "RemoveContainer" containerID="ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559" Nov 25 19:37:59 crc kubenswrapper[4926]: E1125 19:37:59.359811 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559\": container with ID starting with ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559 not found: ID does not exist" containerID="ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.359846 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559"} err="failed to get container status \"ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559\": rpc error: code = NotFound desc = could not find container \"ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559\": container with ID starting with ff4f4d4cbe1a4450108be3873c987e128eef0f8f4d5c73078f2fed7b2168f559 not found: ID does not exist" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.359872 4926 scope.go:117] "RemoveContainer" containerID="09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc" Nov 25 19:37:59 crc kubenswrapper[4926]: E1125 19:37:59.360161 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc\": container with ID starting with 09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc not found: ID does not exist" containerID="09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.360186 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc"} err="failed to get container status \"09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc\": rpc error: code = NotFound desc = could not find container \"09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc\": container with ID starting with 09ce71f61e2272b243bf3f03a73d632955539345cb5af857093dfd1ee99e0fcc not found: ID does not exist" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.360205 4926 scope.go:117] "RemoveContainer" containerID="22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f" Nov 25 19:37:59 crc kubenswrapper[4926]: E1125 19:37:59.360423 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f\": container with ID starting with 22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f not found: ID does not exist" containerID="22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f" Nov 25 19:37:59 crc kubenswrapper[4926]: I1125 19:37:59.360449 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f"} err="failed to get container status \"22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f\": rpc error: code = NotFound desc = could not find container \"22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f\": container with ID starting with 22eb37fa8fe7a1b511a561dc472ca69282b2e7fb92ca7204c17e48cfbfcbfe2f not found: ID does not exist" Nov 25 19:38:00 crc kubenswrapper[4926]: I1125 19:38:00.346012 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" path="/var/lib/kubelet/pods/bed570ec-517d-49b6-a4d5-d7ba38a5bfbb/volumes" Nov 25 19:38:10 crc kubenswrapper[4926]: I1125 19:38:10.342478 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:38:10 crc kubenswrapper[4926]: E1125 19:38:10.343499 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:38:25 crc kubenswrapper[4926]: I1125 19:38:25.329671 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:38:25 crc kubenswrapper[4926]: E1125 19:38:25.330757 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:38:40 crc kubenswrapper[4926]: I1125 19:38:40.330301 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:38:40 crc kubenswrapper[4926]: E1125 19:38:40.331161 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:38:52 crc kubenswrapper[4926]: I1125 19:38:52.330134 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:38:52 crc kubenswrapper[4926]: E1125 19:38:52.331497 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:39:07 crc kubenswrapper[4926]: I1125 19:39:07.329632 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:39:07 crc kubenswrapper[4926]: E1125 19:39:07.330443 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:39:18 crc kubenswrapper[4926]: I1125 19:39:18.329312 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:39:18 crc kubenswrapper[4926]: E1125 19:39:18.330056 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:39:32 crc kubenswrapper[4926]: I1125 19:39:32.329318 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:39:32 crc kubenswrapper[4926]: E1125 19:39:32.330588 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:39:45 crc kubenswrapper[4926]: I1125 19:39:45.329703 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:39:45 crc kubenswrapper[4926]: E1125 19:39:45.330948 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.126598 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5st"] Nov 25 19:39:51 crc kubenswrapper[4926]: E1125 19:39:51.128036 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="registry-server" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.128062 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="registry-server" Nov 25 19:39:51 crc kubenswrapper[4926]: E1125 19:39:51.128114 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="extract-utilities" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.128127 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="extract-utilities" Nov 25 19:39:51 crc kubenswrapper[4926]: E1125 19:39:51.128163 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="extract-content" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.128177 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="extract-content" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.128572 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="bed570ec-517d-49b6-a4d5-d7ba38a5bfbb" containerName="registry-server" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.134052 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.152487 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5st"] Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.208621 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njf78\" (UniqueName: \"kubernetes.io/projected/17759502-5dbb-4abd-a2b6-b564253544ec-kube-api-access-njf78\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.208959 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-catalog-content\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.209080 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-utilities\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.310694 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njf78\" (UniqueName: \"kubernetes.io/projected/17759502-5dbb-4abd-a2b6-b564253544ec-kube-api-access-njf78\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.310775 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-catalog-content\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.310802 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-utilities\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.311597 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-catalog-content\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.311659 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-utilities\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.331190 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njf78\" (UniqueName: \"kubernetes.io/projected/17759502-5dbb-4abd-a2b6-b564253544ec-kube-api-access-njf78\") pod \"redhat-marketplace-4w5st\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.505223 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:39:51 crc kubenswrapper[4926]: I1125 19:39:51.972616 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5st"] Nov 25 19:39:52 crc kubenswrapper[4926]: I1125 19:39:52.775251 4926 generic.go:334] "Generic (PLEG): container finished" podID="17759502-5dbb-4abd-a2b6-b564253544ec" containerID="55e2aed82f3191cb8e708f166ea22e9f54c70074dd9e3eca2270a48bcac571e7" exitCode=0 Nov 25 19:39:52 crc kubenswrapper[4926]: I1125 19:39:52.775339 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5st" event={"ID":"17759502-5dbb-4abd-a2b6-b564253544ec","Type":"ContainerDied","Data":"55e2aed82f3191cb8e708f166ea22e9f54c70074dd9e3eca2270a48bcac571e7"} Nov 25 19:39:52 crc kubenswrapper[4926]: I1125 19:39:52.775743 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5st" event={"ID":"17759502-5dbb-4abd-a2b6-b564253544ec","Type":"ContainerStarted","Data":"bd72f1434c3837aec85e9dba76f2d2dbafcb117cecc7ff0555ecef987b457485"} Nov 25 19:39:53 crc kubenswrapper[4926]: I1125 19:39:53.786297 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5st" event={"ID":"17759502-5dbb-4abd-a2b6-b564253544ec","Type":"ContainerStarted","Data":"e8958e35c180d37ee6938478ded6c4ad521cbcb453506d347ee7eacbe42e0b5e"} Nov 25 19:39:54 crc kubenswrapper[4926]: I1125 19:39:54.800808 4926 generic.go:334] "Generic (PLEG): container finished" podID="17759502-5dbb-4abd-a2b6-b564253544ec" containerID="e8958e35c180d37ee6938478ded6c4ad521cbcb453506d347ee7eacbe42e0b5e" exitCode=0 Nov 25 19:39:54 crc kubenswrapper[4926]: I1125 19:39:54.800881 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5st" event={"ID":"17759502-5dbb-4abd-a2b6-b564253544ec","Type":"ContainerDied","Data":"e8958e35c180d37ee6938478ded6c4ad521cbcb453506d347ee7eacbe42e0b5e"} Nov 25 19:39:55 crc kubenswrapper[4926]: I1125 19:39:55.811349 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5st" event={"ID":"17759502-5dbb-4abd-a2b6-b564253544ec","Type":"ContainerStarted","Data":"f052e0b830a432bef174c73c2c067501e82ee4faa34a7d69d896d6aeb641dddc"} Nov 25 19:40:00 crc kubenswrapper[4926]: I1125 19:40:00.334932 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:40:00 crc kubenswrapper[4926]: E1125 19:40:00.336439 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:40:01 crc kubenswrapper[4926]: I1125 19:40:01.506205 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:40:01 crc kubenswrapper[4926]: I1125 19:40:01.506292 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:40:02 crc kubenswrapper[4926]: I1125 19:40:02.041144 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:40:02 crc kubenswrapper[4926]: I1125 19:40:02.074324 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4w5st" podStartSLOduration=8.521823606 podStartE2EDuration="11.074283758s" podCreationTimestamp="2025-11-25 19:39:51 +0000 UTC" firstStartedPulling="2025-11-25 19:39:52.778202414 +0000 UTC m=+5223.163716029" lastFinishedPulling="2025-11-25 19:39:55.330662556 +0000 UTC m=+5225.716176181" observedRunningTime="2025-11-25 19:39:55.836554344 +0000 UTC m=+5226.222067969" watchObservedRunningTime="2025-11-25 19:40:02.074283758 +0000 UTC m=+5232.459797413" Nov 25 19:40:02 crc kubenswrapper[4926]: I1125 19:40:02.113518 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:40:02 crc kubenswrapper[4926]: I1125 19:40:02.284214 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5st"] Nov 25 19:40:03 crc kubenswrapper[4926]: I1125 19:40:03.934505 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4w5st" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="registry-server" containerID="cri-o://f052e0b830a432bef174c73c2c067501e82ee4faa34a7d69d896d6aeb641dddc" gracePeriod=2 Nov 25 19:40:04 crc kubenswrapper[4926]: I1125 19:40:04.948968 4926 generic.go:334] "Generic (PLEG): container finished" podID="17759502-5dbb-4abd-a2b6-b564253544ec" containerID="f052e0b830a432bef174c73c2c067501e82ee4faa34a7d69d896d6aeb641dddc" exitCode=0 Nov 25 19:40:04 crc kubenswrapper[4926]: I1125 19:40:04.949064 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5st" event={"ID":"17759502-5dbb-4abd-a2b6-b564253544ec","Type":"ContainerDied","Data":"f052e0b830a432bef174c73c2c067501e82ee4faa34a7d69d896d6aeb641dddc"} Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.154262 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.218071 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njf78\" (UniqueName: \"kubernetes.io/projected/17759502-5dbb-4abd-a2b6-b564253544ec-kube-api-access-njf78\") pod \"17759502-5dbb-4abd-a2b6-b564253544ec\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.218124 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-catalog-content\") pod \"17759502-5dbb-4abd-a2b6-b564253544ec\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.218172 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-utilities\") pod \"17759502-5dbb-4abd-a2b6-b564253544ec\" (UID: \"17759502-5dbb-4abd-a2b6-b564253544ec\") " Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.219502 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-utilities" (OuterVolumeSpecName: "utilities") pod "17759502-5dbb-4abd-a2b6-b564253544ec" (UID: "17759502-5dbb-4abd-a2b6-b564253544ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.224162 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17759502-5dbb-4abd-a2b6-b564253544ec-kube-api-access-njf78" (OuterVolumeSpecName: "kube-api-access-njf78") pod "17759502-5dbb-4abd-a2b6-b564253544ec" (UID: "17759502-5dbb-4abd-a2b6-b564253544ec"). InnerVolumeSpecName "kube-api-access-njf78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.250040 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "17759502-5dbb-4abd-a2b6-b564253544ec" (UID: "17759502-5dbb-4abd-a2b6-b564253544ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.320960 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njf78\" (UniqueName: \"kubernetes.io/projected/17759502-5dbb-4abd-a2b6-b564253544ec-kube-api-access-njf78\") on node \"crc\" DevicePath \"\"" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.320988 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.320997 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17759502-5dbb-4abd-a2b6-b564253544ec-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.967632 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4w5st" event={"ID":"17759502-5dbb-4abd-a2b6-b564253544ec","Type":"ContainerDied","Data":"bd72f1434c3837aec85e9dba76f2d2dbafcb117cecc7ff0555ecef987b457485"} Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.967695 4926 scope.go:117] "RemoveContainer" containerID="f052e0b830a432bef174c73c2c067501e82ee4faa34a7d69d896d6aeb641dddc" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.967752 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4w5st" Nov 25 19:40:05 crc kubenswrapper[4926]: I1125 19:40:05.997889 4926 scope.go:117] "RemoveContainer" containerID="e8958e35c180d37ee6938478ded6c4ad521cbcb453506d347ee7eacbe42e0b5e" Nov 25 19:40:06 crc kubenswrapper[4926]: I1125 19:40:06.038168 4926 scope.go:117] "RemoveContainer" containerID="55e2aed82f3191cb8e708f166ea22e9f54c70074dd9e3eca2270a48bcac571e7" Nov 25 19:40:06 crc kubenswrapper[4926]: I1125 19:40:06.059674 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5st"] Nov 25 19:40:06 crc kubenswrapper[4926]: I1125 19:40:06.077350 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4w5st"] Nov 25 19:40:06 crc kubenswrapper[4926]: I1125 19:40:06.344173 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" path="/var/lib/kubelet/pods/17759502-5dbb-4abd-a2b6-b564253544ec/volumes" Nov 25 19:40:14 crc kubenswrapper[4926]: I1125 19:40:14.329719 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:40:14 crc kubenswrapper[4926]: E1125 19:40:14.330685 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:40:28 crc kubenswrapper[4926]: I1125 19:40:28.329008 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:40:28 crc kubenswrapper[4926]: E1125 19:40:28.330034 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:40:42 crc kubenswrapper[4926]: I1125 19:40:42.329900 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:40:42 crc kubenswrapper[4926]: E1125 19:40:42.330716 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:40:54 crc kubenswrapper[4926]: I1125 19:40:54.329947 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:40:54 crc kubenswrapper[4926]: E1125 19:40:54.331806 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:41:05 crc kubenswrapper[4926]: I1125 19:41:05.330291 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:41:05 crc kubenswrapper[4926]: E1125 19:41:05.331242 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:41:16 crc kubenswrapper[4926]: I1125 19:41:16.330257 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:41:16 crc kubenswrapper[4926]: E1125 19:41:16.331234 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:41:28 crc kubenswrapper[4926]: I1125 19:41:28.330038 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:41:28 crc kubenswrapper[4926]: E1125 19:41:28.331402 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:41:39 crc kubenswrapper[4926]: I1125 19:41:39.329929 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:41:40 crc kubenswrapper[4926]: I1125 19:41:40.110201 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"7c0e2c6767951b460a0445ced09e2c9755228b0015d59cf547de2ebc3fa1368e"} Nov 25 19:44:03 crc kubenswrapper[4926]: I1125 19:44:03.541260 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:44:03 crc kubenswrapper[4926]: I1125 19:44:03.541976 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:44:33 crc kubenswrapper[4926]: I1125 19:44:33.541856 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:44:33 crc kubenswrapper[4926]: I1125 19:44:33.542529 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.178755 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl"] Nov 25 19:45:00 crc kubenswrapper[4926]: E1125 19:45:00.180057 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="registry-server" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.180083 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="registry-server" Nov 25 19:45:00 crc kubenswrapper[4926]: E1125 19:45:00.180100 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="extract-content" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.180111 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="extract-content" Nov 25 19:45:00 crc kubenswrapper[4926]: E1125 19:45:00.180161 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="extract-utilities" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.180175 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="extract-utilities" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.180516 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="17759502-5dbb-4abd-a2b6-b564253544ec" containerName="registry-server" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.181337 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.183960 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.185462 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.190391 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl"] Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.335193 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-secret-volume\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.335461 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxxr5\" (UniqueName: \"kubernetes.io/projected/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-kube-api-access-kxxr5\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.335514 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-config-volume\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.437550 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-secret-volume\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.437714 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxxr5\" (UniqueName: \"kubernetes.io/projected/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-kube-api-access-kxxr5\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.437749 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-config-volume\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.439323 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-config-volume\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.456010 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-secret-volume\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.464812 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxxr5\" (UniqueName: \"kubernetes.io/projected/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-kube-api-access-kxxr5\") pod \"collect-profiles-29401665-dt4xl\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:00 crc kubenswrapper[4926]: I1125 19:45:00.535742 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:01 crc kubenswrapper[4926]: I1125 19:45:01.010821 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl"] Nov 25 19:45:01 crc kubenswrapper[4926]: I1125 19:45:01.633510 4926 generic.go:334] "Generic (PLEG): container finished" podID="8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f" containerID="b7f0713d68ee1a9ef45864673204e67ddc59ee923312a5704b4541c092f93d46" exitCode=0 Nov 25 19:45:01 crc kubenswrapper[4926]: I1125 19:45:01.633573 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" event={"ID":"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f","Type":"ContainerDied","Data":"b7f0713d68ee1a9ef45864673204e67ddc59ee923312a5704b4541c092f93d46"} Nov 25 19:45:01 crc kubenswrapper[4926]: I1125 19:45:01.633791 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" event={"ID":"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f","Type":"ContainerStarted","Data":"baa00336ecfc88027c4cba4a0df1beabe228fb6e63b35e2ef6646a8de1cb474a"} Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.109715 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.206864 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-config-volume\") pod \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.207054 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-secret-volume\") pod \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.207187 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxxr5\" (UniqueName: \"kubernetes.io/projected/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-kube-api-access-kxxr5\") pod \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\" (UID: \"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f\") " Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.208016 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-config-volume" (OuterVolumeSpecName: "config-volume") pod "8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f" (UID: "8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.217476 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f" (UID: "8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.223661 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-kube-api-access-kxxr5" (OuterVolumeSpecName: "kube-api-access-kxxr5") pod "8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f" (UID: "8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f"). InnerVolumeSpecName "kube-api-access-kxxr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.310196 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.310238 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxxr5\" (UniqueName: \"kubernetes.io/projected/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-kube-api-access-kxxr5\") on node \"crc\" DevicePath \"\"" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.310256 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.541612 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.541689 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.541740 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.543172 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7c0e2c6767951b460a0445ced09e2c9755228b0015d59cf547de2ebc3fa1368e"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.543253 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://7c0e2c6767951b460a0445ced09e2c9755228b0015d59cf547de2ebc3fa1368e" gracePeriod=600 Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.667230 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" event={"ID":"8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f","Type":"ContainerDied","Data":"baa00336ecfc88027c4cba4a0df1beabe228fb6e63b35e2ef6646a8de1cb474a"} Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.667287 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="baa00336ecfc88027c4cba4a0df1beabe228fb6e63b35e2ef6646a8de1cb474a" Nov 25 19:45:03 crc kubenswrapper[4926]: I1125 19:45:03.667356 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401665-dt4xl" Nov 25 19:45:04 crc kubenswrapper[4926]: I1125 19:45:04.194937 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml"] Nov 25 19:45:04 crc kubenswrapper[4926]: I1125 19:45:04.206060 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401620-pvlml"] Nov 25 19:45:04 crc kubenswrapper[4926]: I1125 19:45:04.342527 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67528577-8a4c-4df6-b801-8a49064d4af6" path="/var/lib/kubelet/pods/67528577-8a4c-4df6-b801-8a49064d4af6/volumes" Nov 25 19:45:04 crc kubenswrapper[4926]: I1125 19:45:04.683110 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="7c0e2c6767951b460a0445ced09e2c9755228b0015d59cf547de2ebc3fa1368e" exitCode=0 Nov 25 19:45:04 crc kubenswrapper[4926]: I1125 19:45:04.683734 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"7c0e2c6767951b460a0445ced09e2c9755228b0015d59cf547de2ebc3fa1368e"} Nov 25 19:45:04 crc kubenswrapper[4926]: I1125 19:45:04.688936 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934"} Nov 25 19:45:04 crc kubenswrapper[4926]: I1125 19:45:04.688998 4926 scope.go:117] "RemoveContainer" containerID="562d8e8baea7842c6fe8261592ca314e0b1bc8c7cc9b59c6e49eee121a07adb5" Nov 25 19:45:25 crc kubenswrapper[4926]: I1125 19:45:25.348065 4926 scope.go:117] "RemoveContainer" containerID="0cf71e2e16575aaae2c8272d660b0d84dbb83e1a68c56feb53c72105a4ed938d" Nov 25 19:47:03 crc kubenswrapper[4926]: I1125 19:47:03.541925 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:47:03 crc kubenswrapper[4926]: I1125 19:47:03.542709 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:47:33 crc kubenswrapper[4926]: I1125 19:47:33.541943 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:47:33 crc kubenswrapper[4926]: I1125 19:47:33.542578 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.115157 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kr4fv"] Nov 25 19:47:48 crc kubenswrapper[4926]: E1125 19:47:48.116780 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f" containerName="collect-profiles" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.116810 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f" containerName="collect-profiles" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.117272 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fdcfc9d-3a0b-4bab-a197-ce5de2d4182f" containerName="collect-profiles" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.122101 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.137975 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kr4fv"] Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.225220 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-catalog-content\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.225280 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-utilities\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.225317 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glv75\" (UniqueName: \"kubernetes.io/projected/05b9d025-b392-4791-8c7d-5fe646cf0db3-kube-api-access-glv75\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.327278 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-catalog-content\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.327571 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-utilities\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.327680 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glv75\" (UniqueName: \"kubernetes.io/projected/05b9d025-b392-4791-8c7d-5fe646cf0db3-kube-api-access-glv75\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.327840 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-catalog-content\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.328080 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-utilities\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.351184 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glv75\" (UniqueName: \"kubernetes.io/projected/05b9d025-b392-4791-8c7d-5fe646cf0db3-kube-api-access-glv75\") pod \"community-operators-kr4fv\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:48 crc kubenswrapper[4926]: I1125 19:47:48.470323 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:49 crc kubenswrapper[4926]: I1125 19:47:49.061033 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kr4fv"] Nov 25 19:47:49 crc kubenswrapper[4926]: I1125 19:47:49.796757 4926 generic.go:334] "Generic (PLEG): container finished" podID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerID="4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b" exitCode=0 Nov 25 19:47:49 crc kubenswrapper[4926]: I1125 19:47:49.796854 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kr4fv" event={"ID":"05b9d025-b392-4791-8c7d-5fe646cf0db3","Type":"ContainerDied","Data":"4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b"} Nov 25 19:47:49 crc kubenswrapper[4926]: I1125 19:47:49.797113 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kr4fv" event={"ID":"05b9d025-b392-4791-8c7d-5fe646cf0db3","Type":"ContainerStarted","Data":"6fa49428d614b75f17df4940a8d13957ca3689004c991f2c820814e665addc57"} Nov 25 19:47:49 crc kubenswrapper[4926]: I1125 19:47:49.799133 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 19:47:50 crc kubenswrapper[4926]: I1125 19:47:50.813515 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kr4fv" event={"ID":"05b9d025-b392-4791-8c7d-5fe646cf0db3","Type":"ContainerStarted","Data":"76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34"} Nov 25 19:47:51 crc kubenswrapper[4926]: I1125 19:47:51.826076 4926 generic.go:334] "Generic (PLEG): container finished" podID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerID="76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34" exitCode=0 Nov 25 19:47:51 crc kubenswrapper[4926]: I1125 19:47:51.826161 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kr4fv" event={"ID":"05b9d025-b392-4791-8c7d-5fe646cf0db3","Type":"ContainerDied","Data":"76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34"} Nov 25 19:47:52 crc kubenswrapper[4926]: I1125 19:47:52.838321 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kr4fv" event={"ID":"05b9d025-b392-4791-8c7d-5fe646cf0db3","Type":"ContainerStarted","Data":"2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2"} Nov 25 19:47:52 crc kubenswrapper[4926]: I1125 19:47:52.857334 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kr4fv" podStartSLOduration=2.424017525 podStartE2EDuration="4.857316767s" podCreationTimestamp="2025-11-25 19:47:48 +0000 UTC" firstStartedPulling="2025-11-25 19:47:49.798786855 +0000 UTC m=+5700.184300480" lastFinishedPulling="2025-11-25 19:47:52.232086117 +0000 UTC m=+5702.617599722" observedRunningTime="2025-11-25 19:47:52.852419465 +0000 UTC m=+5703.237933080" watchObservedRunningTime="2025-11-25 19:47:52.857316767 +0000 UTC m=+5703.242830372" Nov 25 19:47:58 crc kubenswrapper[4926]: I1125 19:47:58.471413 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:58 crc kubenswrapper[4926]: I1125 19:47:58.471952 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:58 crc kubenswrapper[4926]: I1125 19:47:58.556058 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:58 crc kubenswrapper[4926]: I1125 19:47:58.983530 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:47:59 crc kubenswrapper[4926]: I1125 19:47:59.063904 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kr4fv"] Nov 25 19:48:00 crc kubenswrapper[4926]: I1125 19:48:00.927209 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kr4fv" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="registry-server" containerID="cri-o://2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2" gracePeriod=2 Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.493176 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.578765 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-catalog-content\") pod \"05b9d025-b392-4791-8c7d-5fe646cf0db3\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.578976 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glv75\" (UniqueName: \"kubernetes.io/projected/05b9d025-b392-4791-8c7d-5fe646cf0db3-kube-api-access-glv75\") pod \"05b9d025-b392-4791-8c7d-5fe646cf0db3\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.579100 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-utilities\") pod \"05b9d025-b392-4791-8c7d-5fe646cf0db3\" (UID: \"05b9d025-b392-4791-8c7d-5fe646cf0db3\") " Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.580351 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-utilities" (OuterVolumeSpecName: "utilities") pod "05b9d025-b392-4791-8c7d-5fe646cf0db3" (UID: "05b9d025-b392-4791-8c7d-5fe646cf0db3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.600144 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05b9d025-b392-4791-8c7d-5fe646cf0db3-kube-api-access-glv75" (OuterVolumeSpecName: "kube-api-access-glv75") pod "05b9d025-b392-4791-8c7d-5fe646cf0db3" (UID: "05b9d025-b392-4791-8c7d-5fe646cf0db3"). InnerVolumeSpecName "kube-api-access-glv75". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.668772 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "05b9d025-b392-4791-8c7d-5fe646cf0db3" (UID: "05b9d025-b392-4791-8c7d-5fe646cf0db3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.685899 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.686214 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glv75\" (UniqueName: \"kubernetes.io/projected/05b9d025-b392-4791-8c7d-5fe646cf0db3-kube-api-access-glv75\") on node \"crc\" DevicePath \"\"" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.686224 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05b9d025-b392-4791-8c7d-5fe646cf0db3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.940163 4926 generic.go:334] "Generic (PLEG): container finished" podID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerID="2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2" exitCode=0 Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.940212 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kr4fv" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.940211 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kr4fv" event={"ID":"05b9d025-b392-4791-8c7d-5fe646cf0db3","Type":"ContainerDied","Data":"2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2"} Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.940338 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kr4fv" event={"ID":"05b9d025-b392-4791-8c7d-5fe646cf0db3","Type":"ContainerDied","Data":"6fa49428d614b75f17df4940a8d13957ca3689004c991f2c820814e665addc57"} Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.940359 4926 scope.go:117] "RemoveContainer" containerID="2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2" Nov 25 19:48:01 crc kubenswrapper[4926]: I1125 19:48:01.966958 4926 scope.go:117] "RemoveContainer" containerID="76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.044144 4926 scope.go:117] "RemoveContainer" containerID="4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.095350 4926 scope.go:117] "RemoveContainer" containerID="2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.095676 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kr4fv"] Nov 25 19:48:02 crc kubenswrapper[4926]: E1125 19:48:02.096940 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2\": container with ID starting with 2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2 not found: ID does not exist" containerID="2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.096976 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2"} err="failed to get container status \"2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2\": rpc error: code = NotFound desc = could not find container \"2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2\": container with ID starting with 2f35481576985a788b39e345219001132ada803f48eed09181660756f420c9c2 not found: ID does not exist" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.097002 4926 scope.go:117] "RemoveContainer" containerID="76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34" Nov 25 19:48:02 crc kubenswrapper[4926]: E1125 19:48:02.097266 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34\": container with ID starting with 76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34 not found: ID does not exist" containerID="76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.097301 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34"} err="failed to get container status \"76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34\": rpc error: code = NotFound desc = could not find container \"76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34\": container with ID starting with 76397a11ede2fe8d365ca0f42de735d492a82efd2aa579a5bfb508cdebe54a34 not found: ID does not exist" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.097318 4926 scope.go:117] "RemoveContainer" containerID="4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b" Nov 25 19:48:02 crc kubenswrapper[4926]: E1125 19:48:02.097716 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b\": container with ID starting with 4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b not found: ID does not exist" containerID="4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.097744 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b"} err="failed to get container status \"4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b\": rpc error: code = NotFound desc = could not find container \"4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b\": container with ID starting with 4b7b120618f6e5bb016bb503dd625ed42cbd3bfcc496e2b18118efb50efdec7b not found: ID does not exist" Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.106931 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kr4fv"] Nov 25 19:48:02 crc kubenswrapper[4926]: I1125 19:48:02.343302 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" path="/var/lib/kubelet/pods/05b9d025-b392-4791-8c7d-5fe646cf0db3/volumes" Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.542005 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.542490 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.542559 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.543634 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.543721 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" gracePeriod=600 Nov 25 19:48:03 crc kubenswrapper[4926]: E1125 19:48:03.693094 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.975280 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" exitCode=0 Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.975340 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934"} Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.975402 4926 scope.go:117] "RemoveContainer" containerID="7c0e2c6767951b460a0445ced09e2c9755228b0015d59cf547de2ebc3fa1368e" Nov 25 19:48:03 crc kubenswrapper[4926]: I1125 19:48:03.976220 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:48:03 crc kubenswrapper[4926]: E1125 19:48:03.976575 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:48:15 crc kubenswrapper[4926]: I1125 19:48:15.329974 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:48:15 crc kubenswrapper[4926]: E1125 19:48:15.331647 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:48:27 crc kubenswrapper[4926]: I1125 19:48:27.330582 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:48:27 crc kubenswrapper[4926]: E1125 19:48:27.332004 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:48:42 crc kubenswrapper[4926]: I1125 19:48:42.330166 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:48:42 crc kubenswrapper[4926]: E1125 19:48:42.331302 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:48:53 crc kubenswrapper[4926]: I1125 19:48:53.329297 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:48:53 crc kubenswrapper[4926]: E1125 19:48:53.330069 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:49:08 crc kubenswrapper[4926]: I1125 19:49:08.329125 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:49:08 crc kubenswrapper[4926]: E1125 19:49:08.330177 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:49:20 crc kubenswrapper[4926]: I1125 19:49:20.343047 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:49:20 crc kubenswrapper[4926]: E1125 19:49:20.344286 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:49:33 crc kubenswrapper[4926]: I1125 19:49:33.329240 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:49:33 crc kubenswrapper[4926]: E1125 19:49:33.330321 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:49:45 crc kubenswrapper[4926]: I1125 19:49:45.329776 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:49:45 crc kubenswrapper[4926]: E1125 19:49:45.330730 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.420002 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pvg8j"] Nov 25 19:49:55 crc kubenswrapper[4926]: E1125 19:49:55.421068 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="registry-server" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.421081 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="registry-server" Nov 25 19:49:55 crc kubenswrapper[4926]: E1125 19:49:55.421100 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="extract-utilities" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.421116 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="extract-utilities" Nov 25 19:49:55 crc kubenswrapper[4926]: E1125 19:49:55.421155 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="extract-content" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.421161 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="extract-content" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.421401 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="05b9d025-b392-4791-8c7d-5fe646cf0db3" containerName="registry-server" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.422822 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.450119 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pvg8j"] Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.566844 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-utilities\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.566905 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cdfn\" (UniqueName: \"kubernetes.io/projected/52f3cd70-cda4-433a-b4ad-b78d9f699462-kube-api-access-7cdfn\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.567477 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-catalog-content\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.669682 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-catalog-content\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.669812 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-utilities\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.669857 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cdfn\" (UniqueName: \"kubernetes.io/projected/52f3cd70-cda4-433a-b4ad-b78d9f699462-kube-api-access-7cdfn\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.670140 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-catalog-content\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.670206 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-utilities\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.695253 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cdfn\" (UniqueName: \"kubernetes.io/projected/52f3cd70-cda4-433a-b4ad-b78d9f699462-kube-api-access-7cdfn\") pod \"redhat-marketplace-pvg8j\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:55 crc kubenswrapper[4926]: I1125 19:49:55.748441 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:49:56 crc kubenswrapper[4926]: I1125 19:49:56.802396 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pvg8j"] Nov 25 19:49:56 crc kubenswrapper[4926]: W1125 19:49:56.818950 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52f3cd70_cda4_433a_b4ad_b78d9f699462.slice/crio-6c96e9835836165cdd650b2bba145a7d117638f3fd8701ba0a4d91a08b14cebe WatchSource:0}: Error finding container 6c96e9835836165cdd650b2bba145a7d117638f3fd8701ba0a4d91a08b14cebe: Status 404 returned error can't find the container with id 6c96e9835836165cdd650b2bba145a7d117638f3fd8701ba0a4d91a08b14cebe Nov 25 19:49:57 crc kubenswrapper[4926]: I1125 19:49:57.330719 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:49:57 crc kubenswrapper[4926]: E1125 19:49:57.331414 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:49:57 crc kubenswrapper[4926]: I1125 19:49:57.511064 4926 generic.go:334] "Generic (PLEG): container finished" podID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerID="63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1" exitCode=0 Nov 25 19:49:57 crc kubenswrapper[4926]: I1125 19:49:57.511126 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pvg8j" event={"ID":"52f3cd70-cda4-433a-b4ad-b78d9f699462","Type":"ContainerDied","Data":"63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1"} Nov 25 19:49:57 crc kubenswrapper[4926]: I1125 19:49:57.511206 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pvg8j" event={"ID":"52f3cd70-cda4-433a-b4ad-b78d9f699462","Type":"ContainerStarted","Data":"6c96e9835836165cdd650b2bba145a7d117638f3fd8701ba0a4d91a08b14cebe"} Nov 25 19:49:59 crc kubenswrapper[4926]: I1125 19:49:59.543845 4926 generic.go:334] "Generic (PLEG): container finished" podID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerID="06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574" exitCode=0 Nov 25 19:49:59 crc kubenswrapper[4926]: I1125 19:49:59.543963 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pvg8j" event={"ID":"52f3cd70-cda4-433a-b4ad-b78d9f699462","Type":"ContainerDied","Data":"06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574"} Nov 25 19:50:00 crc kubenswrapper[4926]: I1125 19:50:00.559030 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pvg8j" event={"ID":"52f3cd70-cda4-433a-b4ad-b78d9f699462","Type":"ContainerStarted","Data":"cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875"} Nov 25 19:50:00 crc kubenswrapper[4926]: I1125 19:50:00.580468 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pvg8j" podStartSLOduration=3.050107458 podStartE2EDuration="5.580446206s" podCreationTimestamp="2025-11-25 19:49:55 +0000 UTC" firstStartedPulling="2025-11-25 19:49:57.513806224 +0000 UTC m=+5827.899319869" lastFinishedPulling="2025-11-25 19:50:00.044144982 +0000 UTC m=+5830.429658617" observedRunningTime="2025-11-25 19:50:00.574003702 +0000 UTC m=+5830.959517327" watchObservedRunningTime="2025-11-25 19:50:00.580446206 +0000 UTC m=+5830.965959811" Nov 25 19:50:05 crc kubenswrapper[4926]: I1125 19:50:05.748952 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:50:05 crc kubenswrapper[4926]: I1125 19:50:05.749580 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:50:05 crc kubenswrapper[4926]: I1125 19:50:05.841864 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:50:06 crc kubenswrapper[4926]: I1125 19:50:06.723230 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:50:06 crc kubenswrapper[4926]: I1125 19:50:06.794751 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pvg8j"] Nov 25 19:50:08 crc kubenswrapper[4926]: I1125 19:50:08.672548 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pvg8j" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="registry-server" containerID="cri-o://cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875" gracePeriod=2 Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.213831 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.311076 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-catalog-content\") pod \"52f3cd70-cda4-433a-b4ad-b78d9f699462\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.311443 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-utilities\") pod \"52f3cd70-cda4-433a-b4ad-b78d9f699462\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.311590 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cdfn\" (UniqueName: \"kubernetes.io/projected/52f3cd70-cda4-433a-b4ad-b78d9f699462-kube-api-access-7cdfn\") pod \"52f3cd70-cda4-433a-b4ad-b78d9f699462\" (UID: \"52f3cd70-cda4-433a-b4ad-b78d9f699462\") " Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.312003 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-utilities" (OuterVolumeSpecName: "utilities") pod "52f3cd70-cda4-433a-b4ad-b78d9f699462" (UID: "52f3cd70-cda4-433a-b4ad-b78d9f699462"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.312444 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.319616 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52f3cd70-cda4-433a-b4ad-b78d9f699462-kube-api-access-7cdfn" (OuterVolumeSpecName: "kube-api-access-7cdfn") pod "52f3cd70-cda4-433a-b4ad-b78d9f699462" (UID: "52f3cd70-cda4-433a-b4ad-b78d9f699462"). InnerVolumeSpecName "kube-api-access-7cdfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.328087 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52f3cd70-cda4-433a-b4ad-b78d9f699462" (UID: "52f3cd70-cda4-433a-b4ad-b78d9f699462"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.414584 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cdfn\" (UniqueName: \"kubernetes.io/projected/52f3cd70-cda4-433a-b4ad-b78d9f699462-kube-api-access-7cdfn\") on node \"crc\" DevicePath \"\"" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.414636 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f3cd70-cda4-433a-b4ad-b78d9f699462-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.692660 4926 generic.go:334] "Generic (PLEG): container finished" podID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerID="cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875" exitCode=0 Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.692730 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pvg8j" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.692765 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pvg8j" event={"ID":"52f3cd70-cda4-433a-b4ad-b78d9f699462","Type":"ContainerDied","Data":"cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875"} Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.692819 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pvg8j" event={"ID":"52f3cd70-cda4-433a-b4ad-b78d9f699462","Type":"ContainerDied","Data":"6c96e9835836165cdd650b2bba145a7d117638f3fd8701ba0a4d91a08b14cebe"} Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.692862 4926 scope.go:117] "RemoveContainer" containerID="cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.753928 4926 scope.go:117] "RemoveContainer" containerID="06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574" Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.759149 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pvg8j"] Nov 25 19:50:09 crc kubenswrapper[4926]: I1125 19:50:09.775685 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pvg8j"] Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.307770 4926 scope.go:117] "RemoveContainer" containerID="63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.348285 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:50:10 crc kubenswrapper[4926]: E1125 19:50:10.348699 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.363352 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" path="/var/lib/kubelet/pods/52f3cd70-cda4-433a-b4ad-b78d9f699462/volumes" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.579672 4926 scope.go:117] "RemoveContainer" containerID="cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875" Nov 25 19:50:10 crc kubenswrapper[4926]: E1125 19:50:10.580210 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875\": container with ID starting with cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875 not found: ID does not exist" containerID="cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.580259 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875"} err="failed to get container status \"cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875\": rpc error: code = NotFound desc = could not find container \"cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875\": container with ID starting with cb0f2804fe4f496927713df8a9c04cf6b9d257fba54b8cdda1bd1e4bdcc05875 not found: ID does not exist" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.580290 4926 scope.go:117] "RemoveContainer" containerID="06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574" Nov 25 19:50:10 crc kubenswrapper[4926]: E1125 19:50:10.580869 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574\": container with ID starting with 06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574 not found: ID does not exist" containerID="06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.580908 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574"} err="failed to get container status \"06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574\": rpc error: code = NotFound desc = could not find container \"06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574\": container with ID starting with 06b8588efc5883b951c3599134a3dc65df0d847c1811914e3903219bc19c0574 not found: ID does not exist" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.580938 4926 scope.go:117] "RemoveContainer" containerID="63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1" Nov 25 19:50:10 crc kubenswrapper[4926]: E1125 19:50:10.581300 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1\": container with ID starting with 63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1 not found: ID does not exist" containerID="63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1" Nov 25 19:50:10 crc kubenswrapper[4926]: I1125 19:50:10.581327 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1"} err="failed to get container status \"63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1\": rpc error: code = NotFound desc = could not find container \"63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1\": container with ID starting with 63d843cec8040a14fb5d1f94db0fd9c7b2312b8a5c4188ca63318d8aff640fd1 not found: ID does not exist" Nov 25 19:50:21 crc kubenswrapper[4926]: I1125 19:50:21.329539 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:50:21 crc kubenswrapper[4926]: E1125 19:50:21.330730 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:50:36 crc kubenswrapper[4926]: I1125 19:50:36.330096 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:50:36 crc kubenswrapper[4926]: E1125 19:50:36.331024 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:50:51 crc kubenswrapper[4926]: I1125 19:50:51.329712 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:50:51 crc kubenswrapper[4926]: E1125 19:50:51.330528 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:51:04 crc kubenswrapper[4926]: I1125 19:51:04.329932 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:51:04 crc kubenswrapper[4926]: E1125 19:51:04.331192 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:51:15 crc kubenswrapper[4926]: I1125 19:51:15.330662 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:51:15 crc kubenswrapper[4926]: E1125 19:51:15.331740 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:51:29 crc kubenswrapper[4926]: I1125 19:51:29.329314 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:51:29 crc kubenswrapper[4926]: E1125 19:51:29.330275 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:51:40 crc kubenswrapper[4926]: I1125 19:51:40.340091 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:51:40 crc kubenswrapper[4926]: E1125 19:51:40.341202 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:51:51 crc kubenswrapper[4926]: I1125 19:51:51.329983 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:51:51 crc kubenswrapper[4926]: E1125 19:51:51.331201 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:52:06 crc kubenswrapper[4926]: I1125 19:52:06.329598 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:52:06 crc kubenswrapper[4926]: E1125 19:52:06.330231 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:52:18 crc kubenswrapper[4926]: I1125 19:52:18.329502 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:52:18 crc kubenswrapper[4926]: E1125 19:52:18.330209 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:52:29 crc kubenswrapper[4926]: I1125 19:52:29.329253 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:52:29 crc kubenswrapper[4926]: E1125 19:52:29.329887 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:52:44 crc kubenswrapper[4926]: I1125 19:52:44.330711 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:52:44 crc kubenswrapper[4926]: E1125 19:52:44.332370 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:52:55 crc kubenswrapper[4926]: I1125 19:52:55.329896 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:52:55 crc kubenswrapper[4926]: E1125 19:52:55.330699 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:53:10 crc kubenswrapper[4926]: I1125 19:53:10.329296 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:53:11 crc kubenswrapper[4926]: I1125 19:53:11.065252 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"880c762ef6c0ea78a0d4bb461fd3efb10446558d6a3322b86081de4a0be21a90"} Nov 25 19:55:33 crc kubenswrapper[4926]: I1125 19:55:33.541939 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:55:33 crc kubenswrapper[4926]: I1125 19:55:33.542644 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:56:03 crc kubenswrapper[4926]: I1125 19:56:03.542054 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:56:03 crc kubenswrapper[4926]: I1125 19:56:03.542911 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:56:33 crc kubenswrapper[4926]: I1125 19:56:33.541988 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:56:33 crc kubenswrapper[4926]: I1125 19:56:33.543544 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:56:33 crc kubenswrapper[4926]: I1125 19:56:33.543635 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:56:33 crc kubenswrapper[4926]: I1125 19:56:33.544507 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"880c762ef6c0ea78a0d4bb461fd3efb10446558d6a3322b86081de4a0be21a90"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:56:33 crc kubenswrapper[4926]: I1125 19:56:33.544568 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://880c762ef6c0ea78a0d4bb461fd3efb10446558d6a3322b86081de4a0be21a90" gracePeriod=600 Nov 25 19:56:34 crc kubenswrapper[4926]: I1125 19:56:34.596123 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="880c762ef6c0ea78a0d4bb461fd3efb10446558d6a3322b86081de4a0be21a90" exitCode=0 Nov 25 19:56:34 crc kubenswrapper[4926]: I1125 19:56:34.596217 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"880c762ef6c0ea78a0d4bb461fd3efb10446558d6a3322b86081de4a0be21a90"} Nov 25 19:56:34 crc kubenswrapper[4926]: I1125 19:56:34.596864 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf"} Nov 25 19:56:34 crc kubenswrapper[4926]: I1125 19:56:34.596902 4926 scope.go:117] "RemoveContainer" containerID="925362c0f8fddb3fc6efb9610cc2ca2345be16865d4ee5ef123468d7688dc934" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.259733 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rcvln"] Nov 25 19:56:48 crc kubenswrapper[4926]: E1125 19:56:48.260829 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="registry-server" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.260848 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="registry-server" Nov 25 19:56:48 crc kubenswrapper[4926]: E1125 19:56:48.260875 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="extract-utilities" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.260884 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="extract-utilities" Nov 25 19:56:48 crc kubenswrapper[4926]: E1125 19:56:48.260914 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="extract-content" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.260924 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="extract-content" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.261187 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="52f3cd70-cda4-433a-b4ad-b78d9f699462" containerName="registry-server" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.263754 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.272829 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rcvln"] Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.427824 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-utilities\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.427885 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-catalog-content\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.427926 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzm72\" (UniqueName: \"kubernetes.io/projected/6bdb3fcc-8eff-47e6-b839-e042e20a9158-kube-api-access-jzm72\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.529476 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-utilities\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.529526 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-catalog-content\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.529551 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzm72\" (UniqueName: \"kubernetes.io/projected/6bdb3fcc-8eff-47e6-b839-e042e20a9158-kube-api-access-jzm72\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.530007 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-catalog-content\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.530086 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-utilities\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.550880 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzm72\" (UniqueName: \"kubernetes.io/projected/6bdb3fcc-8eff-47e6-b839-e042e20a9158-kube-api-access-jzm72\") pod \"certified-operators-rcvln\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:48 crc kubenswrapper[4926]: I1125 19:56:48.594113 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:49 crc kubenswrapper[4926]: I1125 19:56:49.140257 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rcvln"] Nov 25 19:56:49 crc kubenswrapper[4926]: I1125 19:56:49.828338 4926 generic.go:334] "Generic (PLEG): container finished" podID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerID="083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d" exitCode=0 Nov 25 19:56:49 crc kubenswrapper[4926]: I1125 19:56:49.828430 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rcvln" event={"ID":"6bdb3fcc-8eff-47e6-b839-e042e20a9158","Type":"ContainerDied","Data":"083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d"} Nov 25 19:56:49 crc kubenswrapper[4926]: I1125 19:56:49.828746 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rcvln" event={"ID":"6bdb3fcc-8eff-47e6-b839-e042e20a9158","Type":"ContainerStarted","Data":"6ec08436b44b6ff758a4ff2f7520ac0ff5a715ccca686837f21a39ac3a1d918f"} Nov 25 19:56:49 crc kubenswrapper[4926]: I1125 19:56:49.832098 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 19:56:50 crc kubenswrapper[4926]: I1125 19:56:50.840849 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rcvln" event={"ID":"6bdb3fcc-8eff-47e6-b839-e042e20a9158","Type":"ContainerStarted","Data":"74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032"} Nov 25 19:56:51 crc kubenswrapper[4926]: I1125 19:56:51.858936 4926 generic.go:334] "Generic (PLEG): container finished" podID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerID="74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032" exitCode=0 Nov 25 19:56:51 crc kubenswrapper[4926]: I1125 19:56:51.859296 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rcvln" event={"ID":"6bdb3fcc-8eff-47e6-b839-e042e20a9158","Type":"ContainerDied","Data":"74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032"} Nov 25 19:56:52 crc kubenswrapper[4926]: I1125 19:56:52.872505 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rcvln" event={"ID":"6bdb3fcc-8eff-47e6-b839-e042e20a9158","Type":"ContainerStarted","Data":"3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c"} Nov 25 19:56:52 crc kubenswrapper[4926]: I1125 19:56:52.905996 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rcvln" podStartSLOduration=2.447753139 podStartE2EDuration="4.905961803s" podCreationTimestamp="2025-11-25 19:56:48 +0000 UTC" firstStartedPulling="2025-11-25 19:56:49.831678165 +0000 UTC m=+6240.217191800" lastFinishedPulling="2025-11-25 19:56:52.289886849 +0000 UTC m=+6242.675400464" observedRunningTime="2025-11-25 19:56:52.896494345 +0000 UTC m=+6243.282007970" watchObservedRunningTime="2025-11-25 19:56:52.905961803 +0000 UTC m=+6243.291475408" Nov 25 19:56:58 crc kubenswrapper[4926]: I1125 19:56:58.594855 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:58 crc kubenswrapper[4926]: I1125 19:56:58.595558 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:58 crc kubenswrapper[4926]: I1125 19:56:58.666432 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:59 crc kubenswrapper[4926]: I1125 19:56:59.042786 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:56:59 crc kubenswrapper[4926]: I1125 19:56:59.109514 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rcvln"] Nov 25 19:57:00 crc kubenswrapper[4926]: I1125 19:57:00.976419 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rcvln" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="registry-server" containerID="cri-o://3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c" gracePeriod=2 Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.529652 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.622690 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzm72\" (UniqueName: \"kubernetes.io/projected/6bdb3fcc-8eff-47e6-b839-e042e20a9158-kube-api-access-jzm72\") pod \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.622745 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-utilities\") pod \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.622929 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-catalog-content\") pod \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\" (UID: \"6bdb3fcc-8eff-47e6-b839-e042e20a9158\") " Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.626660 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-utilities" (OuterVolumeSpecName: "utilities") pod "6bdb3fcc-8eff-47e6-b839-e042e20a9158" (UID: "6bdb3fcc-8eff-47e6-b839-e042e20a9158"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.631579 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bdb3fcc-8eff-47e6-b839-e042e20a9158-kube-api-access-jzm72" (OuterVolumeSpecName: "kube-api-access-jzm72") pod "6bdb3fcc-8eff-47e6-b839-e042e20a9158" (UID: "6bdb3fcc-8eff-47e6-b839-e042e20a9158"). InnerVolumeSpecName "kube-api-access-jzm72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.691175 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bdb3fcc-8eff-47e6-b839-e042e20a9158" (UID: "6bdb3fcc-8eff-47e6-b839-e042e20a9158"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.726782 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.726813 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzm72\" (UniqueName: \"kubernetes.io/projected/6bdb3fcc-8eff-47e6-b839-e042e20a9158-kube-api-access-jzm72\") on node \"crc\" DevicePath \"\"" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.726823 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bdb3fcc-8eff-47e6-b839-e042e20a9158-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.990351 4926 generic.go:334] "Generic (PLEG): container finished" podID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerID="3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c" exitCode=0 Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.990443 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rcvln" event={"ID":"6bdb3fcc-8eff-47e6-b839-e042e20a9158","Type":"ContainerDied","Data":"3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c"} Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.990727 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rcvln" event={"ID":"6bdb3fcc-8eff-47e6-b839-e042e20a9158","Type":"ContainerDied","Data":"6ec08436b44b6ff758a4ff2f7520ac0ff5a715ccca686837f21a39ac3a1d918f"} Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.990751 4926 scope.go:117] "RemoveContainer" containerID="3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c" Nov 25 19:57:01 crc kubenswrapper[4926]: I1125 19:57:01.990475 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rcvln" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.046418 4926 scope.go:117] "RemoveContainer" containerID="74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.055973 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rcvln"] Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.069989 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rcvln"] Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.073857 4926 scope.go:117] "RemoveContainer" containerID="083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.133119 4926 scope.go:117] "RemoveContainer" containerID="3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c" Nov 25 19:57:02 crc kubenswrapper[4926]: E1125 19:57:02.133634 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c\": container with ID starting with 3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c not found: ID does not exist" containerID="3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.133691 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c"} err="failed to get container status \"3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c\": rpc error: code = NotFound desc = could not find container \"3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c\": container with ID starting with 3ef4c7a3af462cee6e04bf55ed27f313c1753abbb7cbb5da7ff69bb8cb68ff8c not found: ID does not exist" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.133727 4926 scope.go:117] "RemoveContainer" containerID="74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032" Nov 25 19:57:02 crc kubenswrapper[4926]: E1125 19:57:02.134067 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032\": container with ID starting with 74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032 not found: ID does not exist" containerID="74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.134096 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032"} err="failed to get container status \"74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032\": rpc error: code = NotFound desc = could not find container \"74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032\": container with ID starting with 74bf1d2b9ab5298fdb5b3f532c15d0a16c9966b94ad0df97695b1e15bfd46032 not found: ID does not exist" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.134118 4926 scope.go:117] "RemoveContainer" containerID="083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d" Nov 25 19:57:02 crc kubenswrapper[4926]: E1125 19:57:02.134452 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d\": container with ID starting with 083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d not found: ID does not exist" containerID="083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.134487 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d"} err="failed to get container status \"083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d\": rpc error: code = NotFound desc = could not find container \"083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d\": container with ID starting with 083bc6ca181749ef96fd1410fef08161122c9ee47d86558a1420809cc392151d not found: ID does not exist" Nov 25 19:57:02 crc kubenswrapper[4926]: I1125 19:57:02.351511 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" path="/var/lib/kubelet/pods/6bdb3fcc-8eff-47e6-b839-e042e20a9158/volumes" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.310911 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f4kmh"] Nov 25 19:57:53 crc kubenswrapper[4926]: E1125 19:57:53.314248 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="registry-server" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.314579 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="registry-server" Nov 25 19:57:53 crc kubenswrapper[4926]: E1125 19:57:53.314913 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="extract-utilities" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.315148 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="extract-utilities" Nov 25 19:57:53 crc kubenswrapper[4926]: E1125 19:57:53.315453 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="extract-content" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.315720 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="extract-content" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.317521 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bdb3fcc-8eff-47e6-b839-e042e20a9158" containerName="registry-server" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.326572 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.383223 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f4kmh"] Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.450991 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzxxw\" (UniqueName: \"kubernetes.io/projected/b15f466e-8dea-4aa6-8575-08c8a02d97ff-kube-api-access-zzxxw\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.451504 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-utilities\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.451636 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-catalog-content\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.500494 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9cpb6"] Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.502491 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.536610 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9cpb6"] Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.553493 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-utilities\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.553804 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-catalog-content\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.553874 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzxxw\" (UniqueName: \"kubernetes.io/projected/b15f466e-8dea-4aa6-8575-08c8a02d97ff-kube-api-access-zzxxw\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.554818 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-utilities\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.554935 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-catalog-content\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.597718 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzxxw\" (UniqueName: \"kubernetes.io/projected/b15f466e-8dea-4aa6-8575-08c8a02d97ff-kube-api-access-zzxxw\") pod \"community-operators-f4kmh\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.655789 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-utilities\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.655880 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-catalog-content\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.655969 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzstl\" (UniqueName: \"kubernetes.io/projected/178464ee-9b30-40cc-b131-aa2ddd1810a3-kube-api-access-fzstl\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.657852 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.757276 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-utilities\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.757559 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-catalog-content\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.757659 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-utilities\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.757756 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzstl\" (UniqueName: \"kubernetes.io/projected/178464ee-9b30-40cc-b131-aa2ddd1810a3-kube-api-access-fzstl\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.758607 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-catalog-content\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.801318 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzstl\" (UniqueName: \"kubernetes.io/projected/178464ee-9b30-40cc-b131-aa2ddd1810a3-kube-api-access-fzstl\") pod \"redhat-operators-9cpb6\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:53 crc kubenswrapper[4926]: I1125 19:57:53.828180 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:57:54 crc kubenswrapper[4926]: I1125 19:57:54.271241 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f4kmh"] Nov 25 19:57:54 crc kubenswrapper[4926]: I1125 19:57:54.402146 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9cpb6"] Nov 25 19:57:54 crc kubenswrapper[4926]: W1125 19:57:54.402251 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod178464ee_9b30_40cc_b131_aa2ddd1810a3.slice/crio-25e7fc326ba40b551ba45f36d80d21fc1cb8f234fbbc6f748056ee58dd1e5fe9 WatchSource:0}: Error finding container 25e7fc326ba40b551ba45f36d80d21fc1cb8f234fbbc6f748056ee58dd1e5fe9: Status 404 returned error can't find the container with id 25e7fc326ba40b551ba45f36d80d21fc1cb8f234fbbc6f748056ee58dd1e5fe9 Nov 25 19:57:54 crc kubenswrapper[4926]: I1125 19:57:54.661917 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerStarted","Data":"99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e"} Nov 25 19:57:54 crc kubenswrapper[4926]: I1125 19:57:54.661974 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerStarted","Data":"25e7fc326ba40b551ba45f36d80d21fc1cb8f234fbbc6f748056ee58dd1e5fe9"} Nov 25 19:57:54 crc kubenswrapper[4926]: I1125 19:57:54.666256 4926 generic.go:334] "Generic (PLEG): container finished" podID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerID="0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec" exitCode=0 Nov 25 19:57:54 crc kubenswrapper[4926]: I1125 19:57:54.666302 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f4kmh" event={"ID":"b15f466e-8dea-4aa6-8575-08c8a02d97ff","Type":"ContainerDied","Data":"0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec"} Nov 25 19:57:54 crc kubenswrapper[4926]: I1125 19:57:54.666326 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f4kmh" event={"ID":"b15f466e-8dea-4aa6-8575-08c8a02d97ff","Type":"ContainerStarted","Data":"c4e4b7c0031021e83a98e9395140504af507366ea92ac125735cfb6c5bed3a08"} Nov 25 19:57:55 crc kubenswrapper[4926]: I1125 19:57:55.677438 4926 generic.go:334] "Generic (PLEG): container finished" podID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerID="99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e" exitCode=0 Nov 25 19:57:55 crc kubenswrapper[4926]: I1125 19:57:55.677627 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerDied","Data":"99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e"} Nov 25 19:57:55 crc kubenswrapper[4926]: I1125 19:57:55.682357 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f4kmh" event={"ID":"b15f466e-8dea-4aa6-8575-08c8a02d97ff","Type":"ContainerStarted","Data":"fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102"} Nov 25 19:57:57 crc kubenswrapper[4926]: I1125 19:57:57.703185 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerStarted","Data":"41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7"} Nov 25 19:57:57 crc kubenswrapper[4926]: I1125 19:57:57.705502 4926 generic.go:334] "Generic (PLEG): container finished" podID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerID="fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102" exitCode=0 Nov 25 19:57:57 crc kubenswrapper[4926]: I1125 19:57:57.705532 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f4kmh" event={"ID":"b15f466e-8dea-4aa6-8575-08c8a02d97ff","Type":"ContainerDied","Data":"fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102"} Nov 25 19:57:58 crc kubenswrapper[4926]: I1125 19:57:58.717159 4926 generic.go:334] "Generic (PLEG): container finished" podID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerID="41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7" exitCode=0 Nov 25 19:57:58 crc kubenswrapper[4926]: I1125 19:57:58.717250 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerDied","Data":"41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7"} Nov 25 19:57:58 crc kubenswrapper[4926]: I1125 19:57:58.722113 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f4kmh" event={"ID":"b15f466e-8dea-4aa6-8575-08c8a02d97ff","Type":"ContainerStarted","Data":"c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35"} Nov 25 19:57:58 crc kubenswrapper[4926]: I1125 19:57:58.758659 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f4kmh" podStartSLOduration=2.288245869 podStartE2EDuration="5.758642922s" podCreationTimestamp="2025-11-25 19:57:53 +0000 UTC" firstStartedPulling="2025-11-25 19:57:54.668505153 +0000 UTC m=+6305.054018748" lastFinishedPulling="2025-11-25 19:57:58.138902196 +0000 UTC m=+6308.524415801" observedRunningTime="2025-11-25 19:57:58.755884057 +0000 UTC m=+6309.141397662" watchObservedRunningTime="2025-11-25 19:57:58.758642922 +0000 UTC m=+6309.144156527" Nov 25 19:57:59 crc kubenswrapper[4926]: I1125 19:57:59.744537 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerStarted","Data":"308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d"} Nov 25 19:57:59 crc kubenswrapper[4926]: I1125 19:57:59.774940 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9cpb6" podStartSLOduration=3.343671255 podStartE2EDuration="6.774917413s" podCreationTimestamp="2025-11-25 19:57:53 +0000 UTC" firstStartedPulling="2025-11-25 19:57:55.680903639 +0000 UTC m=+6306.066417234" lastFinishedPulling="2025-11-25 19:57:59.112149787 +0000 UTC m=+6309.497663392" observedRunningTime="2025-11-25 19:57:59.764445208 +0000 UTC m=+6310.149958833" watchObservedRunningTime="2025-11-25 19:57:59.774917413 +0000 UTC m=+6310.160431028" Nov 25 19:58:03 crc kubenswrapper[4926]: I1125 19:58:03.658893 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:58:03 crc kubenswrapper[4926]: I1125 19:58:03.659584 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:58:03 crc kubenswrapper[4926]: I1125 19:58:03.710512 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:58:03 crc kubenswrapper[4926]: I1125 19:58:03.828978 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:58:03 crc kubenswrapper[4926]: I1125 19:58:03.829287 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:58:03 crc kubenswrapper[4926]: I1125 19:58:03.846068 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:58:04 crc kubenswrapper[4926]: I1125 19:58:04.490631 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f4kmh"] Nov 25 19:58:04 crc kubenswrapper[4926]: I1125 19:58:04.880360 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9cpb6" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="registry-server" probeResult="failure" output=< Nov 25 19:58:04 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 19:58:04 crc kubenswrapper[4926]: > Nov 25 19:58:05 crc kubenswrapper[4926]: I1125 19:58:05.825657 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f4kmh" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="registry-server" containerID="cri-o://c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35" gracePeriod=2 Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.367326 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.445452 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-catalog-content\") pod \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.445742 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-utilities\") pod \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.446049 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzxxw\" (UniqueName: \"kubernetes.io/projected/b15f466e-8dea-4aa6-8575-08c8a02d97ff-kube-api-access-zzxxw\") pod \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\" (UID: \"b15f466e-8dea-4aa6-8575-08c8a02d97ff\") " Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.455555 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-utilities" (OuterVolumeSpecName: "utilities") pod "b15f466e-8dea-4aa6-8575-08c8a02d97ff" (UID: "b15f466e-8dea-4aa6-8575-08c8a02d97ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.466745 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b15f466e-8dea-4aa6-8575-08c8a02d97ff-kube-api-access-zzxxw" (OuterVolumeSpecName: "kube-api-access-zzxxw") pod "b15f466e-8dea-4aa6-8575-08c8a02d97ff" (UID: "b15f466e-8dea-4aa6-8575-08c8a02d97ff"). InnerVolumeSpecName "kube-api-access-zzxxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.523281 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b15f466e-8dea-4aa6-8575-08c8a02d97ff" (UID: "b15f466e-8dea-4aa6-8575-08c8a02d97ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.548701 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzxxw\" (UniqueName: \"kubernetes.io/projected/b15f466e-8dea-4aa6-8575-08c8a02d97ff-kube-api-access-zzxxw\") on node \"crc\" DevicePath \"\"" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.548737 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.548770 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15f466e-8dea-4aa6-8575-08c8a02d97ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.840055 4926 generic.go:334] "Generic (PLEG): container finished" podID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerID="c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35" exitCode=0 Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.840113 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f4kmh" event={"ID":"b15f466e-8dea-4aa6-8575-08c8a02d97ff","Type":"ContainerDied","Data":"c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35"} Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.840145 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f4kmh" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.840176 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f4kmh" event={"ID":"b15f466e-8dea-4aa6-8575-08c8a02d97ff","Type":"ContainerDied","Data":"c4e4b7c0031021e83a98e9395140504af507366ea92ac125735cfb6c5bed3a08"} Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.840199 4926 scope.go:117] "RemoveContainer" containerID="c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.890472 4926 scope.go:117] "RemoveContainer" containerID="fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.896957 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f4kmh"] Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.908677 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f4kmh"] Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.930362 4926 scope.go:117] "RemoveContainer" containerID="0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.979194 4926 scope.go:117] "RemoveContainer" containerID="c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35" Nov 25 19:58:06 crc kubenswrapper[4926]: E1125 19:58:06.988958 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35\": container with ID starting with c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35 not found: ID does not exist" containerID="c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.988993 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35"} err="failed to get container status \"c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35\": rpc error: code = NotFound desc = could not find container \"c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35\": container with ID starting with c4023883d586556e81fd854475238edffe8bc7de48bbb03b9b8229247117bb35 not found: ID does not exist" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.989019 4926 scope.go:117] "RemoveContainer" containerID="fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102" Nov 25 19:58:06 crc kubenswrapper[4926]: E1125 19:58:06.989322 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102\": container with ID starting with fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102 not found: ID does not exist" containerID="fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.989345 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102"} err="failed to get container status \"fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102\": rpc error: code = NotFound desc = could not find container \"fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102\": container with ID starting with fcffb64f011e081e756d45e598f1d60e610a5132cb5656baec5b1f9e5a08b102 not found: ID does not exist" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.989357 4926 scope.go:117] "RemoveContainer" containerID="0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec" Nov 25 19:58:06 crc kubenswrapper[4926]: E1125 19:58:06.989695 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec\": container with ID starting with 0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec not found: ID does not exist" containerID="0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec" Nov 25 19:58:06 crc kubenswrapper[4926]: I1125 19:58:06.989742 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec"} err="failed to get container status \"0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec\": rpc error: code = NotFound desc = could not find container \"0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec\": container with ID starting with 0a8e7696eea632827cbd8343e96190465caf0a44c86ba9c3aa08896e687523ec not found: ID does not exist" Nov 25 19:58:08 crc kubenswrapper[4926]: I1125 19:58:08.351091 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" path="/var/lib/kubelet/pods/b15f466e-8dea-4aa6-8575-08c8a02d97ff/volumes" Nov 25 19:58:13 crc kubenswrapper[4926]: I1125 19:58:13.893027 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:58:13 crc kubenswrapper[4926]: I1125 19:58:13.972481 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:58:14 crc kubenswrapper[4926]: I1125 19:58:14.139557 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9cpb6"] Nov 25 19:58:14 crc kubenswrapper[4926]: I1125 19:58:14.933989 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9cpb6" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="registry-server" containerID="cri-o://308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d" gracePeriod=2 Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.476214 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.550205 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-catalog-content\") pod \"178464ee-9b30-40cc-b131-aa2ddd1810a3\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.550674 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-utilities\") pod \"178464ee-9b30-40cc-b131-aa2ddd1810a3\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.550969 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzstl\" (UniqueName: \"kubernetes.io/projected/178464ee-9b30-40cc-b131-aa2ddd1810a3-kube-api-access-fzstl\") pod \"178464ee-9b30-40cc-b131-aa2ddd1810a3\" (UID: \"178464ee-9b30-40cc-b131-aa2ddd1810a3\") " Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.552480 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-utilities" (OuterVolumeSpecName: "utilities") pod "178464ee-9b30-40cc-b131-aa2ddd1810a3" (UID: "178464ee-9b30-40cc-b131-aa2ddd1810a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.563701 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/178464ee-9b30-40cc-b131-aa2ddd1810a3-kube-api-access-fzstl" (OuterVolumeSpecName: "kube-api-access-fzstl") pod "178464ee-9b30-40cc-b131-aa2ddd1810a3" (UID: "178464ee-9b30-40cc-b131-aa2ddd1810a3"). InnerVolumeSpecName "kube-api-access-fzstl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.647103 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "178464ee-9b30-40cc-b131-aa2ddd1810a3" (UID: "178464ee-9b30-40cc-b131-aa2ddd1810a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.653638 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzstl\" (UniqueName: \"kubernetes.io/projected/178464ee-9b30-40cc-b131-aa2ddd1810a3-kube-api-access-fzstl\") on node \"crc\" DevicePath \"\"" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.653673 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.653693 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/178464ee-9b30-40cc-b131-aa2ddd1810a3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.951864 4926 generic.go:334] "Generic (PLEG): container finished" podID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerID="308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d" exitCode=0 Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.951907 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerDied","Data":"308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d"} Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.951936 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9cpb6" event={"ID":"178464ee-9b30-40cc-b131-aa2ddd1810a3","Type":"ContainerDied","Data":"25e7fc326ba40b551ba45f36d80d21fc1cb8f234fbbc6f748056ee58dd1e5fe9"} Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.951955 4926 scope.go:117] "RemoveContainer" containerID="308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.951999 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9cpb6" Nov 25 19:58:15 crc kubenswrapper[4926]: I1125 19:58:15.991050 4926 scope.go:117] "RemoveContainer" containerID="41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.011160 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9cpb6"] Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.025094 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9cpb6"] Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.030776 4926 scope.go:117] "RemoveContainer" containerID="99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.086785 4926 scope.go:117] "RemoveContainer" containerID="308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d" Nov 25 19:58:16 crc kubenswrapper[4926]: E1125 19:58:16.090756 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d\": container with ID starting with 308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d not found: ID does not exist" containerID="308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.090792 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d"} err="failed to get container status \"308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d\": rpc error: code = NotFound desc = could not find container \"308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d\": container with ID starting with 308e4f0292e370100ff50ff865a0d2786ecefc4c0ffebbc568bad0040baa858d not found: ID does not exist" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.090813 4926 scope.go:117] "RemoveContainer" containerID="41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7" Nov 25 19:58:16 crc kubenswrapper[4926]: E1125 19:58:16.091721 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7\": container with ID starting with 41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7 not found: ID does not exist" containerID="41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.091742 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7"} err="failed to get container status \"41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7\": rpc error: code = NotFound desc = could not find container \"41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7\": container with ID starting with 41dfdb3a364f996c01e6299ae3cba208e326f6575260ae5f07198a17a9f7e7c7 not found: ID does not exist" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.091757 4926 scope.go:117] "RemoveContainer" containerID="99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e" Nov 25 19:58:16 crc kubenswrapper[4926]: E1125 19:58:16.091985 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e\": container with ID starting with 99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e not found: ID does not exist" containerID="99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.092000 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e"} err="failed to get container status \"99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e\": rpc error: code = NotFound desc = could not find container \"99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e\": container with ID starting with 99198be7ed959e2822dfe463f6d445bc3ce0b1bc04994f5c2f73d40d18afe64e not found: ID does not exist" Nov 25 19:58:16 crc kubenswrapper[4926]: I1125 19:58:16.351756 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" path="/var/lib/kubelet/pods/178464ee-9b30-40cc-b131-aa2ddd1810a3/volumes" Nov 25 19:58:33 crc kubenswrapper[4926]: I1125 19:58:33.540866 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:58:33 crc kubenswrapper[4926]: I1125 19:58:33.541454 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:59:03 crc kubenswrapper[4926]: I1125 19:59:03.541813 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:59:03 crc kubenswrapper[4926]: I1125 19:59:03.544275 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.541454 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.541946 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.541998 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.542671 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.542731 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" gracePeriod=600 Nov 25 19:59:33 crc kubenswrapper[4926]: E1125 19:59:33.666285 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.955702 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" exitCode=0 Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.955764 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf"} Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.955808 4926 scope.go:117] "RemoveContainer" containerID="880c762ef6c0ea78a0d4bb461fd3efb10446558d6a3322b86081de4a0be21a90" Nov 25 19:59:33 crc kubenswrapper[4926]: I1125 19:59:33.956756 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 19:59:33 crc kubenswrapper[4926]: E1125 19:59:33.957425 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:59:45 crc kubenswrapper[4926]: I1125 19:59:45.330020 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 19:59:45 crc kubenswrapper[4926]: E1125 19:59:45.330931 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 19:59:57 crc kubenswrapper[4926]: I1125 19:59:57.328956 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 19:59:57 crc kubenswrapper[4926]: E1125 19:59:57.329840 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.179583 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s"] Nov 25 20:00:00 crc kubenswrapper[4926]: E1125 20:00:00.180657 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="extract-utilities" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.180678 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="extract-utilities" Nov 25 20:00:00 crc kubenswrapper[4926]: E1125 20:00:00.180693 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="extract-content" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.180699 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="extract-content" Nov 25 20:00:00 crc kubenswrapper[4926]: E1125 20:00:00.180716 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="extract-content" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.180722 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="extract-content" Nov 25 20:00:00 crc kubenswrapper[4926]: E1125 20:00:00.180735 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="registry-server" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.180740 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="registry-server" Nov 25 20:00:00 crc kubenswrapper[4926]: E1125 20:00:00.180758 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="registry-server" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.180765 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="registry-server" Nov 25 20:00:00 crc kubenswrapper[4926]: E1125 20:00:00.180783 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="extract-utilities" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.180791 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="extract-utilities" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.181054 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b15f466e-8dea-4aa6-8575-08c8a02d97ff" containerName="registry-server" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.181096 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="178464ee-9b30-40cc-b131-aa2ddd1810a3" containerName="registry-server" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.182048 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.184500 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.185395 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.192545 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s"] Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.274007 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-config-volume\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.274082 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-secret-volume\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.274169 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtrrz\" (UniqueName: \"kubernetes.io/projected/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-kube-api-access-wtrrz\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.377014 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-config-volume\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.377108 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-secret-volume\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.377202 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtrrz\" (UniqueName: \"kubernetes.io/projected/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-kube-api-access-wtrrz\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.378129 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-config-volume\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.386483 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-secret-volume\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.394176 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtrrz\" (UniqueName: \"kubernetes.io/projected/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-kube-api-access-wtrrz\") pod \"collect-profiles-29401680-r4n7s\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:00 crc kubenswrapper[4926]: I1125 20:00:00.518197 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:01 crc kubenswrapper[4926]: I1125 20:00:01.038842 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s"] Nov 25 20:00:01 crc kubenswrapper[4926]: I1125 20:00:01.290161 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" event={"ID":"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b","Type":"ContainerStarted","Data":"8d575baf8705483cf802e12f6d0e144f69befc298524702ff1ad8922ef7a73f3"} Nov 25 20:00:01 crc kubenswrapper[4926]: I1125 20:00:01.290617 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" event={"ID":"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b","Type":"ContainerStarted","Data":"c0e64ec632e915a11ef7c7063dd20a855978bdc9a118eef1f47c3042281b5288"} Nov 25 20:00:01 crc kubenswrapper[4926]: I1125 20:00:01.312080 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" podStartSLOduration=1.312057599 podStartE2EDuration="1.312057599s" podCreationTimestamp="2025-11-25 20:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 20:00:01.308314317 +0000 UTC m=+6431.693827932" watchObservedRunningTime="2025-11-25 20:00:01.312057599 +0000 UTC m=+6431.697571194" Nov 25 20:00:02 crc kubenswrapper[4926]: I1125 20:00:02.303721 4926 generic.go:334] "Generic (PLEG): container finished" podID="1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b" containerID="8d575baf8705483cf802e12f6d0e144f69befc298524702ff1ad8922ef7a73f3" exitCode=0 Nov 25 20:00:02 crc kubenswrapper[4926]: I1125 20:00:02.304058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" event={"ID":"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b","Type":"ContainerDied","Data":"8d575baf8705483cf802e12f6d0e144f69befc298524702ff1ad8922ef7a73f3"} Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.757172 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.848751 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtrrz\" (UniqueName: \"kubernetes.io/projected/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-kube-api-access-wtrrz\") pod \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.848894 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-secret-volume\") pod \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.849190 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-config-volume\") pod \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\" (UID: \"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b\") " Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.849803 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-config-volume" (OuterVolumeSpecName: "config-volume") pod "1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b" (UID: "1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.850835 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.854991 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b" (UID: "1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.855657 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-kube-api-access-wtrrz" (OuterVolumeSpecName: "kube-api-access-wtrrz") pod "1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b" (UID: "1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b"). InnerVolumeSpecName "kube-api-access-wtrrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.954268 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtrrz\" (UniqueName: \"kubernetes.io/projected/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-kube-api-access-wtrrz\") on node \"crc\" DevicePath \"\"" Nov 25 20:00:03 crc kubenswrapper[4926]: I1125 20:00:03.954349 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 20:00:04 crc kubenswrapper[4926]: I1125 20:00:04.330543 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" Nov 25 20:00:04 crc kubenswrapper[4926]: I1125 20:00:04.348349 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401680-r4n7s" event={"ID":"1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b","Type":"ContainerDied","Data":"c0e64ec632e915a11ef7c7063dd20a855978bdc9a118eef1f47c3042281b5288"} Nov 25 20:00:04 crc kubenswrapper[4926]: I1125 20:00:04.348445 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0e64ec632e915a11ef7c7063dd20a855978bdc9a118eef1f47c3042281b5288" Nov 25 20:00:04 crc kubenswrapper[4926]: I1125 20:00:04.429999 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk"] Nov 25 20:00:04 crc kubenswrapper[4926]: I1125 20:00:04.444960 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401635-9ldmk"] Nov 25 20:00:06 crc kubenswrapper[4926]: I1125 20:00:06.347654 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46b88141-c093-48d6-9e52-6cd42570c8dc" path="/var/lib/kubelet/pods/46b88141-c093-48d6-9e52-6cd42570c8dc/volumes" Nov 25 20:00:09 crc kubenswrapper[4926]: I1125 20:00:09.329265 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:00:09 crc kubenswrapper[4926]: E1125 20:00:09.331620 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:00:24 crc kubenswrapper[4926]: I1125 20:00:24.329144 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:00:24 crc kubenswrapper[4926]: E1125 20:00:24.330291 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:00:25 crc kubenswrapper[4926]: I1125 20:00:25.900191 4926 scope.go:117] "RemoveContainer" containerID="4503e6ea8158be6fee693deb1669f39a466f9086706f77d7b19053e6f7c0dacd" Nov 25 20:00:37 crc kubenswrapper[4926]: I1125 20:00:37.330257 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:00:37 crc kubenswrapper[4926]: E1125 20:00:37.331138 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:00:50 crc kubenswrapper[4926]: I1125 20:00:50.346674 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:00:50 crc kubenswrapper[4926]: E1125 20:00:50.347980 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.189350 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401681-7zh4v"] Nov 25 20:01:00 crc kubenswrapper[4926]: E1125 20:01:00.191016 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b" containerName="collect-profiles" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.191052 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b" containerName="collect-profiles" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.191566 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1db91ac3-4c19-4440-8aa1-42c1fc2e3b8b" containerName="collect-profiles" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.193942 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.224313 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401681-7zh4v"] Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.274963 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps2vm\" (UniqueName: \"kubernetes.io/projected/fd006279-ba5f-4a25-814a-17004757d8a3-kube-api-access-ps2vm\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.275089 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-config-data\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.275157 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-fernet-keys\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.275229 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-combined-ca-bundle\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.378032 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-fernet-keys\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.378451 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-combined-ca-bundle\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.378776 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps2vm\" (UniqueName: \"kubernetes.io/projected/fd006279-ba5f-4a25-814a-17004757d8a3-kube-api-access-ps2vm\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.379703 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-config-data\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.386299 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-combined-ca-bundle\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.392021 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-fernet-keys\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.395284 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-config-data\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.398240 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps2vm\" (UniqueName: \"kubernetes.io/projected/fd006279-ba5f-4a25-814a-17004757d8a3-kube-api-access-ps2vm\") pod \"keystone-cron-29401681-7zh4v\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:00 crc kubenswrapper[4926]: I1125 20:01:00.539472 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:01 crc kubenswrapper[4926]: I1125 20:01:01.035002 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401681-7zh4v"] Nov 25 20:01:01 crc kubenswrapper[4926]: I1125 20:01:01.108498 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401681-7zh4v" event={"ID":"fd006279-ba5f-4a25-814a-17004757d8a3","Type":"ContainerStarted","Data":"4ae4f04a0e28685a43c6c853c52ed6cbcf47adcdb90067e3480bc0c6581f5bd9"} Nov 25 20:01:02 crc kubenswrapper[4926]: I1125 20:01:02.121345 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401681-7zh4v" event={"ID":"fd006279-ba5f-4a25-814a-17004757d8a3","Type":"ContainerStarted","Data":"ddd4b3ae191f1865426a6450fefbace610e0b173e8a324352781ca2739c5fc25"} Nov 25 20:01:02 crc kubenswrapper[4926]: I1125 20:01:02.144625 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401681-7zh4v" podStartSLOduration=2.144604385 podStartE2EDuration="2.144604385s" podCreationTimestamp="2025-11-25 20:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 20:01:02.135469157 +0000 UTC m=+6492.520982812" watchObservedRunningTime="2025-11-25 20:01:02.144604385 +0000 UTC m=+6492.530118000" Nov 25 20:01:04 crc kubenswrapper[4926]: I1125 20:01:04.329941 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:01:04 crc kubenswrapper[4926]: E1125 20:01:04.330927 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:01:05 crc kubenswrapper[4926]: I1125 20:01:05.152921 4926 generic.go:334] "Generic (PLEG): container finished" podID="fd006279-ba5f-4a25-814a-17004757d8a3" containerID="ddd4b3ae191f1865426a6450fefbace610e0b173e8a324352781ca2739c5fc25" exitCode=0 Nov 25 20:01:05 crc kubenswrapper[4926]: I1125 20:01:05.152998 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401681-7zh4v" event={"ID":"fd006279-ba5f-4a25-814a-17004757d8a3","Type":"ContainerDied","Data":"ddd4b3ae191f1865426a6450fefbace610e0b173e8a324352781ca2739c5fc25"} Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.559300 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.712098 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-combined-ca-bundle\") pod \"fd006279-ba5f-4a25-814a-17004757d8a3\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.712492 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-fernet-keys\") pod \"fd006279-ba5f-4a25-814a-17004757d8a3\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.712704 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-config-data\") pod \"fd006279-ba5f-4a25-814a-17004757d8a3\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.712868 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps2vm\" (UniqueName: \"kubernetes.io/projected/fd006279-ba5f-4a25-814a-17004757d8a3-kube-api-access-ps2vm\") pod \"fd006279-ba5f-4a25-814a-17004757d8a3\" (UID: \"fd006279-ba5f-4a25-814a-17004757d8a3\") " Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.732641 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd006279-ba5f-4a25-814a-17004757d8a3-kube-api-access-ps2vm" (OuterVolumeSpecName: "kube-api-access-ps2vm") pod "fd006279-ba5f-4a25-814a-17004757d8a3" (UID: "fd006279-ba5f-4a25-814a-17004757d8a3"). InnerVolumeSpecName "kube-api-access-ps2vm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.734578 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fd006279-ba5f-4a25-814a-17004757d8a3" (UID: "fd006279-ba5f-4a25-814a-17004757d8a3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.766166 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd006279-ba5f-4a25-814a-17004757d8a3" (UID: "fd006279-ba5f-4a25-814a-17004757d8a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.809899 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-config-data" (OuterVolumeSpecName: "config-data") pod "fd006279-ba5f-4a25-814a-17004757d8a3" (UID: "fd006279-ba5f-4a25-814a-17004757d8a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.815853 4926 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.815891 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.815912 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps2vm\" (UniqueName: \"kubernetes.io/projected/fd006279-ba5f-4a25-814a-17004757d8a3-kube-api-access-ps2vm\") on node \"crc\" DevicePath \"\"" Nov 25 20:01:06 crc kubenswrapper[4926]: I1125 20:01:06.815933 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd006279-ba5f-4a25-814a-17004757d8a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 20:01:07 crc kubenswrapper[4926]: I1125 20:01:07.181537 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401681-7zh4v" event={"ID":"fd006279-ba5f-4a25-814a-17004757d8a3","Type":"ContainerDied","Data":"4ae4f04a0e28685a43c6c853c52ed6cbcf47adcdb90067e3480bc0c6581f5bd9"} Nov 25 20:01:07 crc kubenswrapper[4926]: I1125 20:01:07.182042 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ae4f04a0e28685a43c6c853c52ed6cbcf47adcdb90067e3480bc0c6581f5bd9" Nov 25 20:01:07 crc kubenswrapper[4926]: I1125 20:01:07.181654 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401681-7zh4v" Nov 25 20:01:16 crc kubenswrapper[4926]: I1125 20:01:16.330144 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:01:16 crc kubenswrapper[4926]: E1125 20:01:16.331350 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:01:30 crc kubenswrapper[4926]: I1125 20:01:30.337298 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:01:30 crc kubenswrapper[4926]: E1125 20:01:30.338288 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:01:42 crc kubenswrapper[4926]: I1125 20:01:42.330053 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:01:42 crc kubenswrapper[4926]: E1125 20:01:42.331037 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:01:57 crc kubenswrapper[4926]: I1125 20:01:57.329676 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:01:57 crc kubenswrapper[4926]: E1125 20:01:57.330997 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:02:09 crc kubenswrapper[4926]: I1125 20:02:09.330434 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:02:09 crc kubenswrapper[4926]: E1125 20:02:09.331773 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:02:20 crc kubenswrapper[4926]: I1125 20:02:20.343520 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:02:20 crc kubenswrapper[4926]: E1125 20:02:20.344791 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:02:33 crc kubenswrapper[4926]: I1125 20:02:33.330001 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:02:33 crc kubenswrapper[4926]: E1125 20:02:33.330922 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:02:46 crc kubenswrapper[4926]: I1125 20:02:46.329812 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:02:46 crc kubenswrapper[4926]: E1125 20:02:46.330626 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:02:59 crc kubenswrapper[4926]: I1125 20:02:59.329569 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:02:59 crc kubenswrapper[4926]: E1125 20:02:59.330656 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:03:11 crc kubenswrapper[4926]: I1125 20:03:11.330483 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:03:11 crc kubenswrapper[4926]: E1125 20:03:11.331752 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:03:26 crc kubenswrapper[4926]: I1125 20:03:26.330129 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:03:26 crc kubenswrapper[4926]: E1125 20:03:26.331146 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.329352 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:03:39 crc kubenswrapper[4926]: E1125 20:03:39.330281 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.572450 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6glvp"] Nov 25 20:03:39 crc kubenswrapper[4926]: E1125 20:03:39.573122 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd006279-ba5f-4a25-814a-17004757d8a3" containerName="keystone-cron" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.573153 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd006279-ba5f-4a25-814a-17004757d8a3" containerName="keystone-cron" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.573575 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd006279-ba5f-4a25-814a-17004757d8a3" containerName="keystone-cron" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.582143 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.594348 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6glvp"] Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.721084 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-utilities\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.721228 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjm7l\" (UniqueName: \"kubernetes.io/projected/b96994f7-b460-428c-ab05-c168c4330bbf-kube-api-access-zjm7l\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.721296 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-catalog-content\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.823122 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-catalog-content\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.823232 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-utilities\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.823495 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjm7l\" (UniqueName: \"kubernetes.io/projected/b96994f7-b460-428c-ab05-c168c4330bbf-kube-api-access-zjm7l\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.823690 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-catalog-content\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.823717 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-utilities\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.853400 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjm7l\" (UniqueName: \"kubernetes.io/projected/b96994f7-b460-428c-ab05-c168c4330bbf-kube-api-access-zjm7l\") pod \"redhat-marketplace-6glvp\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:39 crc kubenswrapper[4926]: I1125 20:03:39.912936 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:40 crc kubenswrapper[4926]: I1125 20:03:40.377879 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6glvp"] Nov 25 20:03:40 crc kubenswrapper[4926]: W1125 20:03:40.395810 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb96994f7_b460_428c_ab05_c168c4330bbf.slice/crio-da660d2c11ba65d04f4f415d102ef0c40bab80f82f53ef9d9d1ccfdb2b234b99 WatchSource:0}: Error finding container da660d2c11ba65d04f4f415d102ef0c40bab80f82f53ef9d9d1ccfdb2b234b99: Status 404 returned error can't find the container with id da660d2c11ba65d04f4f415d102ef0c40bab80f82f53ef9d9d1ccfdb2b234b99 Nov 25 20:03:41 crc kubenswrapper[4926]: I1125 20:03:41.166714 4926 generic.go:334] "Generic (PLEG): container finished" podID="b96994f7-b460-428c-ab05-c168c4330bbf" containerID="5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97" exitCode=0 Nov 25 20:03:41 crc kubenswrapper[4926]: I1125 20:03:41.166808 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6glvp" event={"ID":"b96994f7-b460-428c-ab05-c168c4330bbf","Type":"ContainerDied","Data":"5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97"} Nov 25 20:03:41 crc kubenswrapper[4926]: I1125 20:03:41.167027 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6glvp" event={"ID":"b96994f7-b460-428c-ab05-c168c4330bbf","Type":"ContainerStarted","Data":"da660d2c11ba65d04f4f415d102ef0c40bab80f82f53ef9d9d1ccfdb2b234b99"} Nov 25 20:03:41 crc kubenswrapper[4926]: I1125 20:03:41.170661 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 20:03:43 crc kubenswrapper[4926]: I1125 20:03:43.198614 4926 generic.go:334] "Generic (PLEG): container finished" podID="b96994f7-b460-428c-ab05-c168c4330bbf" containerID="d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066" exitCode=0 Nov 25 20:03:43 crc kubenswrapper[4926]: I1125 20:03:43.198723 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6glvp" event={"ID":"b96994f7-b460-428c-ab05-c168c4330bbf","Type":"ContainerDied","Data":"d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066"} Nov 25 20:03:44 crc kubenswrapper[4926]: I1125 20:03:44.216888 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6glvp" event={"ID":"b96994f7-b460-428c-ab05-c168c4330bbf","Type":"ContainerStarted","Data":"299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56"} Nov 25 20:03:44 crc kubenswrapper[4926]: I1125 20:03:44.248634 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6glvp" podStartSLOduration=2.709790357 podStartE2EDuration="5.248611745s" podCreationTimestamp="2025-11-25 20:03:39 +0000 UTC" firstStartedPulling="2025-11-25 20:03:41.170153693 +0000 UTC m=+6651.555667338" lastFinishedPulling="2025-11-25 20:03:43.708975111 +0000 UTC m=+6654.094488726" observedRunningTime="2025-11-25 20:03:44.239506238 +0000 UTC m=+6654.625019853" watchObservedRunningTime="2025-11-25 20:03:44.248611745 +0000 UTC m=+6654.634125370" Nov 25 20:03:49 crc kubenswrapper[4926]: I1125 20:03:49.913055 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:49 crc kubenswrapper[4926]: I1125 20:03:49.913829 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:49 crc kubenswrapper[4926]: I1125 20:03:49.972521 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:50 crc kubenswrapper[4926]: I1125 20:03:50.361718 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:50 crc kubenswrapper[4926]: I1125 20:03:50.954093 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6glvp"] Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.312153 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6glvp" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="registry-server" containerID="cri-o://299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56" gracePeriod=2 Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.810569 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.820131 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-catalog-content\") pod \"b96994f7-b460-428c-ab05-c168c4330bbf\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.822276 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjm7l\" (UniqueName: \"kubernetes.io/projected/b96994f7-b460-428c-ab05-c168c4330bbf-kube-api-access-zjm7l\") pod \"b96994f7-b460-428c-ab05-c168c4330bbf\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.822514 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-utilities\") pod \"b96994f7-b460-428c-ab05-c168c4330bbf\" (UID: \"b96994f7-b460-428c-ab05-c168c4330bbf\") " Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.823680 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-utilities" (OuterVolumeSpecName: "utilities") pod "b96994f7-b460-428c-ab05-c168c4330bbf" (UID: "b96994f7-b460-428c-ab05-c168c4330bbf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.828262 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b96994f7-b460-428c-ab05-c168c4330bbf-kube-api-access-zjm7l" (OuterVolumeSpecName: "kube-api-access-zjm7l") pod "b96994f7-b460-428c-ab05-c168c4330bbf" (UID: "b96994f7-b460-428c-ab05-c168c4330bbf"). InnerVolumeSpecName "kube-api-access-zjm7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.847295 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b96994f7-b460-428c-ab05-c168c4330bbf" (UID: "b96994f7-b460-428c-ab05-c168c4330bbf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.924527 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.924569 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96994f7-b460-428c-ab05-c168c4330bbf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 20:03:52 crc kubenswrapper[4926]: I1125 20:03:52.924582 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjm7l\" (UniqueName: \"kubernetes.io/projected/b96994f7-b460-428c-ab05-c168c4330bbf-kube-api-access-zjm7l\") on node \"crc\" DevicePath \"\"" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.329774 4926 generic.go:334] "Generic (PLEG): container finished" podID="b96994f7-b460-428c-ab05-c168c4330bbf" containerID="299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56" exitCode=0 Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.329840 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6glvp" event={"ID":"b96994f7-b460-428c-ab05-c168c4330bbf","Type":"ContainerDied","Data":"299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56"} Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.329892 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6glvp" event={"ID":"b96994f7-b460-428c-ab05-c168c4330bbf","Type":"ContainerDied","Data":"da660d2c11ba65d04f4f415d102ef0c40bab80f82f53ef9d9d1ccfdb2b234b99"} Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.329933 4926 scope.go:117] "RemoveContainer" containerID="299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.329958 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6glvp" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.370513 4926 scope.go:117] "RemoveContainer" containerID="d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.411759 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6glvp"] Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.414604 4926 scope.go:117] "RemoveContainer" containerID="5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.457916 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6glvp"] Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.495624 4926 scope.go:117] "RemoveContainer" containerID="299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56" Nov 25 20:03:53 crc kubenswrapper[4926]: E1125 20:03:53.496436 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56\": container with ID starting with 299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56 not found: ID does not exist" containerID="299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.496479 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56"} err="failed to get container status \"299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56\": rpc error: code = NotFound desc = could not find container \"299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56\": container with ID starting with 299d902a6a4e66602381d83d3e06a807e60984e547bc1a623e0d82da26f27a56 not found: ID does not exist" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.496503 4926 scope.go:117] "RemoveContainer" containerID="d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066" Nov 25 20:03:53 crc kubenswrapper[4926]: E1125 20:03:53.497243 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066\": container with ID starting with d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066 not found: ID does not exist" containerID="d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.497302 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066"} err="failed to get container status \"d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066\": rpc error: code = NotFound desc = could not find container \"d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066\": container with ID starting with d2e08bdc6b0d6ac654768f0a8e707e7ead825cb9287b4944923bc10704c6e066 not found: ID does not exist" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.497335 4926 scope.go:117] "RemoveContainer" containerID="5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97" Nov 25 20:03:53 crc kubenswrapper[4926]: E1125 20:03:53.497898 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97\": container with ID starting with 5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97 not found: ID does not exist" containerID="5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97" Nov 25 20:03:53 crc kubenswrapper[4926]: I1125 20:03:53.497952 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97"} err="failed to get container status \"5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97\": rpc error: code = NotFound desc = could not find container \"5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97\": container with ID starting with 5130e661807f33b387703c7bcaa58843072c56f461a1c9968c018e17d4dc1b97 not found: ID does not exist" Nov 25 20:03:54 crc kubenswrapper[4926]: I1125 20:03:54.329320 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:03:54 crc kubenswrapper[4926]: E1125 20:03:54.329865 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:03:54 crc kubenswrapper[4926]: I1125 20:03:54.346168 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" path="/var/lib/kubelet/pods/b96994f7-b460-428c-ab05-c168c4330bbf/volumes" Nov 25 20:04:08 crc kubenswrapper[4926]: I1125 20:04:08.331957 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:04:08 crc kubenswrapper[4926]: E1125 20:04:08.333004 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:04:20 crc kubenswrapper[4926]: I1125 20:04:20.345743 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:04:20 crc kubenswrapper[4926]: E1125 20:04:20.346442 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:04:32 crc kubenswrapper[4926]: I1125 20:04:32.330146 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:04:32 crc kubenswrapper[4926]: E1125 20:04:32.331331 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:04:47 crc kubenswrapper[4926]: I1125 20:04:47.329935 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:04:47 crc kubenswrapper[4926]: I1125 20:04:47.981448 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"9733fdc26b40a269b4aaa32efd9446986c81dcc6e35bdb98fa7f158d1c5abba6"} Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.528092 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zngp6"] Nov 25 20:06:52 crc kubenswrapper[4926]: E1125 20:06:52.529545 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="extract-utilities" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.529568 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="extract-utilities" Nov 25 20:06:52 crc kubenswrapper[4926]: E1125 20:06:52.529631 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="registry-server" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.529643 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="registry-server" Nov 25 20:06:52 crc kubenswrapper[4926]: E1125 20:06:52.529719 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="extract-content" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.529733 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="extract-content" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.530360 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b96994f7-b460-428c-ab05-c168c4330bbf" containerName="registry-server" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.535780 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.554765 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zngp6"] Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.636803 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cw2m\" (UniqueName: \"kubernetes.io/projected/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-kube-api-access-9cw2m\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.636850 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-catalog-content\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.637289 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-utilities\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.738514 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cw2m\" (UniqueName: \"kubernetes.io/projected/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-kube-api-access-9cw2m\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.738578 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-catalog-content\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.738684 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-utilities\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.739141 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-utilities\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.739734 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-catalog-content\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.770785 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cw2m\" (UniqueName: \"kubernetes.io/projected/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-kube-api-access-9cw2m\") pod \"certified-operators-zngp6\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:52 crc kubenswrapper[4926]: I1125 20:06:52.868161 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:06:53 crc kubenswrapper[4926]: I1125 20:06:53.449292 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zngp6"] Nov 25 20:06:53 crc kubenswrapper[4926]: I1125 20:06:53.755602 4926 generic.go:334] "Generic (PLEG): container finished" podID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerID="a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908" exitCode=0 Nov 25 20:06:53 crc kubenswrapper[4926]: I1125 20:06:53.755676 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zngp6" event={"ID":"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f","Type":"ContainerDied","Data":"a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908"} Nov 25 20:06:53 crc kubenswrapper[4926]: I1125 20:06:53.756088 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zngp6" event={"ID":"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f","Type":"ContainerStarted","Data":"0bfc0a3df2c5767f5e95ffa133fce5e5ff4bda8f51a436e5422a25c4a9495d06"} Nov 25 20:06:54 crc kubenswrapper[4926]: I1125 20:06:54.767200 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zngp6" event={"ID":"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f","Type":"ContainerStarted","Data":"234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309"} Nov 25 20:06:55 crc kubenswrapper[4926]: I1125 20:06:55.782586 4926 generic.go:334] "Generic (PLEG): container finished" podID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerID="234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309" exitCode=0 Nov 25 20:06:55 crc kubenswrapper[4926]: I1125 20:06:55.782781 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zngp6" event={"ID":"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f","Type":"ContainerDied","Data":"234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309"} Nov 25 20:06:56 crc kubenswrapper[4926]: I1125 20:06:56.794497 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zngp6" event={"ID":"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f","Type":"ContainerStarted","Data":"51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4"} Nov 25 20:06:56 crc kubenswrapper[4926]: I1125 20:06:56.818709 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zngp6" podStartSLOduration=2.379869762 podStartE2EDuration="4.818688671s" podCreationTimestamp="2025-11-25 20:06:52 +0000 UTC" firstStartedPulling="2025-11-25 20:06:53.757865197 +0000 UTC m=+6844.143378802" lastFinishedPulling="2025-11-25 20:06:56.196684086 +0000 UTC m=+6846.582197711" observedRunningTime="2025-11-25 20:06:56.811891309 +0000 UTC m=+6847.197404914" watchObservedRunningTime="2025-11-25 20:06:56.818688671 +0000 UTC m=+6847.204202286" Nov 25 20:07:02 crc kubenswrapper[4926]: I1125 20:07:02.868760 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:07:02 crc kubenswrapper[4926]: I1125 20:07:02.869335 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:07:02 crc kubenswrapper[4926]: I1125 20:07:02.934824 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:07:03 crc kubenswrapper[4926]: I1125 20:07:03.541924 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:07:03 crc kubenswrapper[4926]: I1125 20:07:03.542300 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:07:03 crc kubenswrapper[4926]: I1125 20:07:03.953968 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:07:04 crc kubenswrapper[4926]: I1125 20:07:04.022448 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zngp6"] Nov 25 20:07:05 crc kubenswrapper[4926]: I1125 20:07:05.915330 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zngp6" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="registry-server" containerID="cri-o://51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4" gracePeriod=2 Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.461113 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.559590 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-utilities\") pod \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.560131 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-catalog-content\") pod \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.560174 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cw2m\" (UniqueName: \"kubernetes.io/projected/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-kube-api-access-9cw2m\") pod \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\" (UID: \"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f\") " Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.560687 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-utilities" (OuterVolumeSpecName: "utilities") pod "e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" (UID: "e6f29963-eb34-4d7b-a6c8-0f1bf773e22f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.560808 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.566985 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-kube-api-access-9cw2m" (OuterVolumeSpecName: "kube-api-access-9cw2m") pod "e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" (UID: "e6f29963-eb34-4d7b-a6c8-0f1bf773e22f"). InnerVolumeSpecName "kube-api-access-9cw2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.608001 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" (UID: "e6f29963-eb34-4d7b-a6c8-0f1bf773e22f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.662904 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.662939 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cw2m\" (UniqueName: \"kubernetes.io/projected/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f-kube-api-access-9cw2m\") on node \"crc\" DevicePath \"\"" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.927940 4926 generic.go:334] "Generic (PLEG): container finished" podID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerID="51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4" exitCode=0 Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.928002 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zngp6" event={"ID":"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f","Type":"ContainerDied","Data":"51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4"} Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.928052 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zngp6" event={"ID":"e6f29963-eb34-4d7b-a6c8-0f1bf773e22f","Type":"ContainerDied","Data":"0bfc0a3df2c5767f5e95ffa133fce5e5ff4bda8f51a436e5422a25c4a9495d06"} Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.928088 4926 scope.go:117] "RemoveContainer" containerID="51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.928087 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zngp6" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.951719 4926 scope.go:117] "RemoveContainer" containerID="234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309" Nov 25 20:07:06 crc kubenswrapper[4926]: I1125 20:07:06.991191 4926 scope.go:117] "RemoveContainer" containerID="a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908" Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.007446 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zngp6"] Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.019996 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zngp6"] Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.048414 4926 scope.go:117] "RemoveContainer" containerID="51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4" Nov 25 20:07:07 crc kubenswrapper[4926]: E1125 20:07:07.049081 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4\": container with ID starting with 51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4 not found: ID does not exist" containerID="51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4" Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.049171 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4"} err="failed to get container status \"51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4\": rpc error: code = NotFound desc = could not find container \"51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4\": container with ID starting with 51603c2ed38bb819a8e1fb565c50768ea3c14f4fdfe68c00b8c2e866c18aafd4 not found: ID does not exist" Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.049219 4926 scope.go:117] "RemoveContainer" containerID="234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309" Nov 25 20:07:07 crc kubenswrapper[4926]: E1125 20:07:07.049755 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309\": container with ID starting with 234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309 not found: ID does not exist" containerID="234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309" Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.049799 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309"} err="failed to get container status \"234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309\": rpc error: code = NotFound desc = could not find container \"234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309\": container with ID starting with 234b02d89ce7359090cbba0a76774714506fc87ed864fef6b978b5f5a98e3309 not found: ID does not exist" Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.049826 4926 scope.go:117] "RemoveContainer" containerID="a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908" Nov 25 20:07:07 crc kubenswrapper[4926]: E1125 20:07:07.050193 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908\": container with ID starting with a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908 not found: ID does not exist" containerID="a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908" Nov 25 20:07:07 crc kubenswrapper[4926]: I1125 20:07:07.050233 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908"} err="failed to get container status \"a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908\": rpc error: code = NotFound desc = could not find container \"a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908\": container with ID starting with a40310a1d1bbeb5db7c61d543bef729bd15ec11bcd75c710b4386aec562ef908 not found: ID does not exist" Nov 25 20:07:08 crc kubenswrapper[4926]: I1125 20:07:08.340921 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" path="/var/lib/kubelet/pods/e6f29963-eb34-4d7b-a6c8-0f1bf773e22f/volumes" Nov 25 20:07:33 crc kubenswrapper[4926]: I1125 20:07:33.541766 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:07:33 crc kubenswrapper[4926]: I1125 20:07:33.542478 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:08:03 crc kubenswrapper[4926]: I1125 20:08:03.541156 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:08:03 crc kubenswrapper[4926]: I1125 20:08:03.541915 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:08:03 crc kubenswrapper[4926]: I1125 20:08:03.541988 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 20:08:03 crc kubenswrapper[4926]: I1125 20:08:03.543187 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9733fdc26b40a269b4aaa32efd9446986c81dcc6e35bdb98fa7f158d1c5abba6"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 20:08:03 crc kubenswrapper[4926]: I1125 20:08:03.543309 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://9733fdc26b40a269b4aaa32efd9446986c81dcc6e35bdb98fa7f158d1c5abba6" gracePeriod=600 Nov 25 20:08:04 crc kubenswrapper[4926]: I1125 20:08:04.632487 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="9733fdc26b40a269b4aaa32efd9446986c81dcc6e35bdb98fa7f158d1c5abba6" exitCode=0 Nov 25 20:08:04 crc kubenswrapper[4926]: I1125 20:08:04.632538 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"9733fdc26b40a269b4aaa32efd9446986c81dcc6e35bdb98fa7f158d1c5abba6"} Nov 25 20:08:04 crc kubenswrapper[4926]: I1125 20:08:04.633177 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad"} Nov 25 20:08:04 crc kubenswrapper[4926]: I1125 20:08:04.633209 4926 scope.go:117] "RemoveContainer" containerID="48a539cff2af35b434b7a8827e5b0a5295bbe89fb6a2668ca8b6b1941ee91adf" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.614287 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qlf69"] Nov 25 20:08:55 crc kubenswrapper[4926]: E1125 20:08:55.615548 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="registry-server" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.615574 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="registry-server" Nov 25 20:08:55 crc kubenswrapper[4926]: E1125 20:08:55.615599 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="extract-content" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.615610 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="extract-content" Nov 25 20:08:55 crc kubenswrapper[4926]: E1125 20:08:55.615671 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="extract-utilities" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.615683 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="extract-utilities" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.616051 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6f29963-eb34-4d7b-a6c8-0f1bf773e22f" containerName="registry-server" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.618500 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.628189 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qlf69"] Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.727976 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-utilities\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.728071 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-catalog-content\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.728224 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l26x\" (UniqueName: \"kubernetes.io/projected/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-kube-api-access-9l26x\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.830449 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l26x\" (UniqueName: \"kubernetes.io/projected/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-kube-api-access-9l26x\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.830599 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-utilities\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.830632 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-catalog-content\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.831179 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-utilities\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.831220 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-catalog-content\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.853421 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l26x\" (UniqueName: \"kubernetes.io/projected/5b2ed5fc-51b6-4b2a-abdc-312ac928d471-kube-api-access-9l26x\") pod \"community-operators-qlf69\" (UID: \"5b2ed5fc-51b6-4b2a-abdc-312ac928d471\") " pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:55 crc kubenswrapper[4926]: I1125 20:08:55.943846 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:08:56 crc kubenswrapper[4926]: I1125 20:08:56.554541 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qlf69"] Nov 25 20:08:57 crc kubenswrapper[4926]: I1125 20:08:57.281167 4926 generic.go:334] "Generic (PLEG): container finished" podID="5b2ed5fc-51b6-4b2a-abdc-312ac928d471" containerID="2a23b18031f1f36f462fcdfeb76bed4617cfbbfd8f19a0073f3da5f0efecd64e" exitCode=0 Nov 25 20:08:57 crc kubenswrapper[4926]: I1125 20:08:57.281263 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qlf69" event={"ID":"5b2ed5fc-51b6-4b2a-abdc-312ac928d471","Type":"ContainerDied","Data":"2a23b18031f1f36f462fcdfeb76bed4617cfbbfd8f19a0073f3da5f0efecd64e"} Nov 25 20:08:57 crc kubenswrapper[4926]: I1125 20:08:57.281808 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qlf69" event={"ID":"5b2ed5fc-51b6-4b2a-abdc-312ac928d471","Type":"ContainerStarted","Data":"e8dbb96a6587f03cea753eabba42a97bc9c555b06ad70755b7d0df1b3eb2aaa8"} Nov 25 20:08:57 crc kubenswrapper[4926]: I1125 20:08:57.284156 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 20:09:02 crc kubenswrapper[4926]: I1125 20:09:02.361247 4926 generic.go:334] "Generic (PLEG): container finished" podID="5b2ed5fc-51b6-4b2a-abdc-312ac928d471" containerID="bd9f63ecca0e0c6846313aad524a29c221721df84bd47a5c51d8a990336c68bc" exitCode=0 Nov 25 20:09:02 crc kubenswrapper[4926]: I1125 20:09:02.364062 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qlf69" event={"ID":"5b2ed5fc-51b6-4b2a-abdc-312ac928d471","Type":"ContainerDied","Data":"bd9f63ecca0e0c6846313aad524a29c221721df84bd47a5c51d8a990336c68bc"} Nov 25 20:09:03 crc kubenswrapper[4926]: I1125 20:09:03.384654 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qlf69" event={"ID":"5b2ed5fc-51b6-4b2a-abdc-312ac928d471","Type":"ContainerStarted","Data":"9f3d21df9cd55b34d0f20bdac0957b313023bd7b9acd2c209d883ffd37c3144c"} Nov 25 20:09:03 crc kubenswrapper[4926]: I1125 20:09:03.419786 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qlf69" podStartSLOduration=2.889641015 podStartE2EDuration="8.419758047s" podCreationTimestamp="2025-11-25 20:08:55 +0000 UTC" firstStartedPulling="2025-11-25 20:08:57.283850696 +0000 UTC m=+6967.669364321" lastFinishedPulling="2025-11-25 20:09:02.813967758 +0000 UTC m=+6973.199481353" observedRunningTime="2025-11-25 20:09:03.408977766 +0000 UTC m=+6973.794491381" watchObservedRunningTime="2025-11-25 20:09:03.419758047 +0000 UTC m=+6973.805271682" Nov 25 20:09:05 crc kubenswrapper[4926]: I1125 20:09:05.944552 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:09:05 crc kubenswrapper[4926]: I1125 20:09:05.945307 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:09:06 crc kubenswrapper[4926]: I1125 20:09:06.065773 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.041193 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qlf69" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.127603 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qlf69"] Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.181650 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wjsww"] Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.181928 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wjsww" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="registry-server" containerID="cri-o://fba00f4dadb2d3eea5da78a6012aaa91948eb7947d95fd7e9d0052eb9945f24a" gracePeriod=2 Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.558851 4926 generic.go:334] "Generic (PLEG): container finished" podID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerID="fba00f4dadb2d3eea5da78a6012aaa91948eb7947d95fd7e9d0052eb9945f24a" exitCode=0 Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.558901 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjsww" event={"ID":"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea","Type":"ContainerDied","Data":"fba00f4dadb2d3eea5da78a6012aaa91948eb7947d95fd7e9d0052eb9945f24a"} Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.810792 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjsww" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.858648 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-catalog-content\") pod \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.858703 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-utilities\") pod \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.858797 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhzwn\" (UniqueName: \"kubernetes.io/projected/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-kube-api-access-jhzwn\") pod \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\" (UID: \"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea\") " Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.859484 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-utilities" (OuterVolumeSpecName: "utilities") pod "a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" (UID: "a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.874634 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-kube-api-access-jhzwn" (OuterVolumeSpecName: "kube-api-access-jhzwn") pod "a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" (UID: "a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea"). InnerVolumeSpecName "kube-api-access-jhzwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.934894 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" (UID: "a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.961312 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.961348 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 20:09:16 crc kubenswrapper[4926]: I1125 20:09:16.961358 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhzwn\" (UniqueName: \"kubernetes.io/projected/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea-kube-api-access-jhzwn\") on node \"crc\" DevicePath \"\"" Nov 25 20:09:17 crc kubenswrapper[4926]: I1125 20:09:17.573423 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wjsww" event={"ID":"a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea","Type":"ContainerDied","Data":"647ce0d7ed7f0f6be2a0070b0412c8c9fffdcb811d314419debc2d2ab6dab1c9"} Nov 25 20:09:17 crc kubenswrapper[4926]: I1125 20:09:17.573498 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wjsww" Nov 25 20:09:17 crc kubenswrapper[4926]: I1125 20:09:17.573520 4926 scope.go:117] "RemoveContainer" containerID="fba00f4dadb2d3eea5da78a6012aaa91948eb7947d95fd7e9d0052eb9945f24a" Nov 25 20:09:17 crc kubenswrapper[4926]: I1125 20:09:17.603951 4926 scope.go:117] "RemoveContainer" containerID="4817cd05b8c86d4b338f39b7314e82234eb9432ff067c2c214254e8592bcd445" Nov 25 20:09:17 crc kubenswrapper[4926]: I1125 20:09:17.626973 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wjsww"] Nov 25 20:09:17 crc kubenswrapper[4926]: I1125 20:09:17.633418 4926 scope.go:117] "RemoveContainer" containerID="3a5d5b5ddb38055cca8e3f3c8f6d5cd41fcc096758dbb9f0a1896ee142918c4a" Nov 25 20:09:17 crc kubenswrapper[4926]: I1125 20:09:17.638907 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wjsww"] Nov 25 20:09:18 crc kubenswrapper[4926]: I1125 20:09:18.341451 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" path="/var/lib/kubelet/pods/a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea/volumes" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.550640 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rtnrl/must-gather-sdwwr"] Nov 25 20:09:47 crc kubenswrapper[4926]: E1125 20:09:47.551409 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="extract-content" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.551419 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="extract-content" Nov 25 20:09:47 crc kubenswrapper[4926]: E1125 20:09:47.551445 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="registry-server" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.551454 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="registry-server" Nov 25 20:09:47 crc kubenswrapper[4926]: E1125 20:09:47.551472 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="extract-utilities" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.551478 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="extract-utilities" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.551684 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a505186f-3d1f-42e8-8bf4-c8abcc1ec8ea" containerName="registry-server" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.552713 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.554400 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rtnrl"/"kube-root-ca.crt" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.554972 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rtnrl"/"openshift-service-ca.crt" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.556622 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rtnrl"/"default-dockercfg-5mbl7" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.562240 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rtnrl/must-gather-sdwwr"] Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.647706 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-must-gather-output\") pod \"must-gather-sdwwr\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.647767 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptgbd\" (UniqueName: \"kubernetes.io/projected/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-kube-api-access-ptgbd\") pod \"must-gather-sdwwr\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.749997 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-must-gather-output\") pod \"must-gather-sdwwr\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.750053 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptgbd\" (UniqueName: \"kubernetes.io/projected/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-kube-api-access-ptgbd\") pod \"must-gather-sdwwr\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.750464 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-must-gather-output\") pod \"must-gather-sdwwr\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.775027 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptgbd\" (UniqueName: \"kubernetes.io/projected/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-kube-api-access-ptgbd\") pod \"must-gather-sdwwr\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:47 crc kubenswrapper[4926]: I1125 20:09:47.870007 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:09:48 crc kubenswrapper[4926]: I1125 20:09:48.384725 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rtnrl/must-gather-sdwwr"] Nov 25 20:09:48 crc kubenswrapper[4926]: I1125 20:09:48.981696 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" event={"ID":"a198c3ed-d652-4d90-87c1-ac0f117fd1f7","Type":"ContainerStarted","Data":"247a809baf62e992fc4f902623798852151276877ae32034fb7b697be126cde2"} Nov 25 20:09:56 crc kubenswrapper[4926]: I1125 20:09:56.077338 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" event={"ID":"a198c3ed-d652-4d90-87c1-ac0f117fd1f7","Type":"ContainerStarted","Data":"79c08244b69a86eef6e95722d70c511ae8cb51386790a43cd47b3ba9af23af20"} Nov 25 20:09:57 crc kubenswrapper[4926]: I1125 20:09:57.093682 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" event={"ID":"a198c3ed-d652-4d90-87c1-ac0f117fd1f7","Type":"ContainerStarted","Data":"17965182a11249e6e4fc4c2080ba16d37afee597ff84ff2923f4df5ce92e7cb5"} Nov 25 20:09:57 crc kubenswrapper[4926]: I1125 20:09:57.121463 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" podStartSLOduration=2.728766591 podStartE2EDuration="10.121436916s" podCreationTimestamp="2025-11-25 20:09:47 +0000 UTC" firstStartedPulling="2025-11-25 20:09:48.390605234 +0000 UTC m=+7018.776118849" lastFinishedPulling="2025-11-25 20:09:55.783275569 +0000 UTC m=+7026.168789174" observedRunningTime="2025-11-25 20:09:57.115943678 +0000 UTC m=+7027.501457293" watchObservedRunningTime="2025-11-25 20:09:57.121436916 +0000 UTC m=+7027.506950561" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.071028 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-glphq"] Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.072742 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.224141 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d3438ae5-91c3-493b-a262-0d383d481850-host\") pod \"crc-debug-glphq\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.224364 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b58cq\" (UniqueName: \"kubernetes.io/projected/d3438ae5-91c3-493b-a262-0d383d481850-kube-api-access-b58cq\") pod \"crc-debug-glphq\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.327017 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d3438ae5-91c3-493b-a262-0d383d481850-host\") pod \"crc-debug-glphq\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.327107 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d3438ae5-91c3-493b-a262-0d383d481850-host\") pod \"crc-debug-glphq\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.327286 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b58cq\" (UniqueName: \"kubernetes.io/projected/d3438ae5-91c3-493b-a262-0d383d481850-kube-api-access-b58cq\") pod \"crc-debug-glphq\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.371906 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b58cq\" (UniqueName: \"kubernetes.io/projected/d3438ae5-91c3-493b-a262-0d383d481850-kube-api-access-b58cq\") pod \"crc-debug-glphq\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:00 crc kubenswrapper[4926]: I1125 20:10:00.392916 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:10:01 crc kubenswrapper[4926]: I1125 20:10:01.136863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-glphq" event={"ID":"d3438ae5-91c3-493b-a262-0d383d481850","Type":"ContainerStarted","Data":"edcd7e004ee8733ffac12aac2aa2c7e6fcacf3d228eb104a20ae47b7c3312768"} Nov 25 20:10:03 crc kubenswrapper[4926]: I1125 20:10:03.541635 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:10:03 crc kubenswrapper[4926]: I1125 20:10:03.541923 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:10:11 crc kubenswrapper[4926]: I1125 20:10:11.265815 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-glphq" event={"ID":"d3438ae5-91c3-493b-a262-0d383d481850","Type":"ContainerStarted","Data":"09baec705472dcacfefe30bcad02d075f251c1c19e904f65fb4b1f1f2a44c669"} Nov 25 20:10:11 crc kubenswrapper[4926]: I1125 20:10:11.292520 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rtnrl/crc-debug-glphq" podStartSLOduration=0.873291234 podStartE2EDuration="11.2925026s" podCreationTimestamp="2025-11-25 20:10:00 +0000 UTC" firstStartedPulling="2025-11-25 20:10:00.433532944 +0000 UTC m=+7030.819046549" lastFinishedPulling="2025-11-25 20:10:10.85274431 +0000 UTC m=+7041.238257915" observedRunningTime="2025-11-25 20:10:11.285054799 +0000 UTC m=+7041.670568404" watchObservedRunningTime="2025-11-25 20:10:11.2925026 +0000 UTC m=+7041.678016205" Nov 25 20:10:33 crc kubenswrapper[4926]: I1125 20:10:33.541071 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:10:33 crc kubenswrapper[4926]: I1125 20:10:33.541596 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:11:00 crc kubenswrapper[4926]: I1125 20:11:00.770277 4926 generic.go:334] "Generic (PLEG): container finished" podID="d3438ae5-91c3-493b-a262-0d383d481850" containerID="09baec705472dcacfefe30bcad02d075f251c1c19e904f65fb4b1f1f2a44c669" exitCode=0 Nov 25 20:11:00 crc kubenswrapper[4926]: I1125 20:11:00.770861 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-glphq" event={"ID":"d3438ae5-91c3-493b-a262-0d383d481850","Type":"ContainerDied","Data":"09baec705472dcacfefe30bcad02d075f251c1c19e904f65fb4b1f1f2a44c669"} Nov 25 20:11:01 crc kubenswrapper[4926]: I1125 20:11:01.912780 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:11:01 crc kubenswrapper[4926]: I1125 20:11:01.948472 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-glphq"] Nov 25 20:11:01 crc kubenswrapper[4926]: I1125 20:11:01.960252 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-glphq"] Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.067066 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d3438ae5-91c3-493b-a262-0d383d481850-host\") pod \"d3438ae5-91c3-493b-a262-0d383d481850\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.067205 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3438ae5-91c3-493b-a262-0d383d481850-host" (OuterVolumeSpecName: "host") pod "d3438ae5-91c3-493b-a262-0d383d481850" (UID: "d3438ae5-91c3-493b-a262-0d383d481850"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.067276 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b58cq\" (UniqueName: \"kubernetes.io/projected/d3438ae5-91c3-493b-a262-0d383d481850-kube-api-access-b58cq\") pod \"d3438ae5-91c3-493b-a262-0d383d481850\" (UID: \"d3438ae5-91c3-493b-a262-0d383d481850\") " Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.067718 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d3438ae5-91c3-493b-a262-0d383d481850-host\") on node \"crc\" DevicePath \"\"" Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.073342 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3438ae5-91c3-493b-a262-0d383d481850-kube-api-access-b58cq" (OuterVolumeSpecName: "kube-api-access-b58cq") pod "d3438ae5-91c3-493b-a262-0d383d481850" (UID: "d3438ae5-91c3-493b-a262-0d383d481850"). InnerVolumeSpecName "kube-api-access-b58cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.169194 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b58cq\" (UniqueName: \"kubernetes.io/projected/d3438ae5-91c3-493b-a262-0d383d481850-kube-api-access-b58cq\") on node \"crc\" DevicePath \"\"" Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.340528 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3438ae5-91c3-493b-a262-0d383d481850" path="/var/lib/kubelet/pods/d3438ae5-91c3-493b-a262-0d383d481850/volumes" Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.795322 4926 scope.go:117] "RemoveContainer" containerID="09baec705472dcacfefe30bcad02d075f251c1c19e904f65fb4b1f1f2a44c669" Nov 25 20:11:02 crc kubenswrapper[4926]: I1125 20:11:02.795712 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-glphq" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.170520 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-6x9fk"] Nov 25 20:11:03 crc kubenswrapper[4926]: E1125 20:11:03.171328 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3438ae5-91c3-493b-a262-0d383d481850" containerName="container-00" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.171351 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3438ae5-91c3-493b-a262-0d383d481850" containerName="container-00" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.171825 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3438ae5-91c3-493b-a262-0d383d481850" containerName="container-00" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.172958 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.289894 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3575891-6af8-4778-bd84-0e4e51558256-host\") pod \"crc-debug-6x9fk\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.290302 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqd57\" (UniqueName: \"kubernetes.io/projected/e3575891-6af8-4778-bd84-0e4e51558256-kube-api-access-qqd57\") pod \"crc-debug-6x9fk\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.393486 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3575891-6af8-4778-bd84-0e4e51558256-host\") pod \"crc-debug-6x9fk\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.393880 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqd57\" (UniqueName: \"kubernetes.io/projected/e3575891-6af8-4778-bd84-0e4e51558256-kube-api-access-qqd57\") pod \"crc-debug-6x9fk\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.393683 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3575891-6af8-4778-bd84-0e4e51558256-host\") pod \"crc-debug-6x9fk\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.415988 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqd57\" (UniqueName: \"kubernetes.io/projected/e3575891-6af8-4778-bd84-0e4e51558256-kube-api-access-qqd57\") pod \"crc-debug-6x9fk\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.499081 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.541638 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.541706 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.541754 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.542747 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.542814 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" gracePeriod=600 Nov 25 20:11:03 crc kubenswrapper[4926]: E1125 20:11:03.668790 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.816309 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" event={"ID":"e3575891-6af8-4778-bd84-0e4e51558256","Type":"ContainerStarted","Data":"830a778e06ad4b4222b7b21a758d61d9d8dc5f8cbe77971b00babdcf190da67c"} Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.818650 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" exitCode=0 Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.818692 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad"} Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.818726 4926 scope.go:117] "RemoveContainer" containerID="9733fdc26b40a269b4aaa32efd9446986c81dcc6e35bdb98fa7f158d1c5abba6" Nov 25 20:11:03 crc kubenswrapper[4926]: I1125 20:11:03.819256 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:11:03 crc kubenswrapper[4926]: E1125 20:11:03.819502 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:11:04 crc kubenswrapper[4926]: I1125 20:11:04.843679 4926 generic.go:334] "Generic (PLEG): container finished" podID="e3575891-6af8-4778-bd84-0e4e51558256" containerID="211842072b5668367f74c52fb007cba637ce15c26b3d3134ec551350e7fbc409" exitCode=0 Nov 25 20:11:04 crc kubenswrapper[4926]: I1125 20:11:04.843786 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" event={"ID":"e3575891-6af8-4778-bd84-0e4e51558256","Type":"ContainerDied","Data":"211842072b5668367f74c52fb007cba637ce15c26b3d3134ec551350e7fbc409"} Nov 25 20:11:05 crc kubenswrapper[4926]: I1125 20:11:05.969704 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.162352 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3575891-6af8-4778-bd84-0e4e51558256-host\") pod \"e3575891-6af8-4778-bd84-0e4e51558256\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.162580 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqd57\" (UniqueName: \"kubernetes.io/projected/e3575891-6af8-4778-bd84-0e4e51558256-kube-api-access-qqd57\") pod \"e3575891-6af8-4778-bd84-0e4e51558256\" (UID: \"e3575891-6af8-4778-bd84-0e4e51558256\") " Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.162641 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e3575891-6af8-4778-bd84-0e4e51558256-host" (OuterVolumeSpecName: "host") pod "e3575891-6af8-4778-bd84-0e4e51558256" (UID: "e3575891-6af8-4778-bd84-0e4e51558256"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.163851 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3575891-6af8-4778-bd84-0e4e51558256-host\") on node \"crc\" DevicePath \"\"" Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.168405 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3575891-6af8-4778-bd84-0e4e51558256-kube-api-access-qqd57" (OuterVolumeSpecName: "kube-api-access-qqd57") pod "e3575891-6af8-4778-bd84-0e4e51558256" (UID: "e3575891-6af8-4778-bd84-0e4e51558256"). InnerVolumeSpecName "kube-api-access-qqd57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.265183 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqd57\" (UniqueName: \"kubernetes.io/projected/e3575891-6af8-4778-bd84-0e4e51558256-kube-api-access-qqd57\") on node \"crc\" DevicePath \"\"" Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.863787 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" event={"ID":"e3575891-6af8-4778-bd84-0e4e51558256","Type":"ContainerDied","Data":"830a778e06ad4b4222b7b21a758d61d9d8dc5f8cbe77971b00babdcf190da67c"} Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.864109 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="830a778e06ad4b4222b7b21a758d61d9d8dc5f8cbe77971b00babdcf190da67c" Nov 25 20:11:06 crc kubenswrapper[4926]: I1125 20:11:06.863851 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-6x9fk" Nov 25 20:11:07 crc kubenswrapper[4926]: I1125 20:11:07.320405 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-6x9fk"] Nov 25 20:11:07 crc kubenswrapper[4926]: I1125 20:11:07.329416 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-6x9fk"] Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.346131 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3575891-6af8-4778-bd84-0e4e51558256" path="/var/lib/kubelet/pods/e3575891-6af8-4778-bd84-0e4e51558256/volumes" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.532606 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-df6ml"] Nov 25 20:11:08 crc kubenswrapper[4926]: E1125 20:11:08.533040 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3575891-6af8-4778-bd84-0e4e51558256" containerName="container-00" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.533058 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3575891-6af8-4778-bd84-0e4e51558256" containerName="container-00" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.533254 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3575891-6af8-4778-bd84-0e4e51558256" containerName="container-00" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.533965 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.711981 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78e25de7-1efb-4c83-be89-c9fe6cb7727b-host\") pod \"crc-debug-df6ml\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.712086 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t4wl\" (UniqueName: \"kubernetes.io/projected/78e25de7-1efb-4c83-be89-c9fe6cb7727b-kube-api-access-7t4wl\") pod \"crc-debug-df6ml\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.814568 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78e25de7-1efb-4c83-be89-c9fe6cb7727b-host\") pod \"crc-debug-df6ml\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.814679 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t4wl\" (UniqueName: \"kubernetes.io/projected/78e25de7-1efb-4c83-be89-c9fe6cb7727b-kube-api-access-7t4wl\") pod \"crc-debug-df6ml\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.814711 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78e25de7-1efb-4c83-be89-c9fe6cb7727b-host\") pod \"crc-debug-df6ml\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.834361 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t4wl\" (UniqueName: \"kubernetes.io/projected/78e25de7-1efb-4c83-be89-c9fe6cb7727b-kube-api-access-7t4wl\") pod \"crc-debug-df6ml\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:08 crc kubenswrapper[4926]: I1125 20:11:08.857236 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:09 crc kubenswrapper[4926]: I1125 20:11:09.911834 4926 generic.go:334] "Generic (PLEG): container finished" podID="78e25de7-1efb-4c83-be89-c9fe6cb7727b" containerID="79c696313d5edae22d99031bcd1cf0beaf12b61f8b46579e4432abd2567b27ac" exitCode=0 Nov 25 20:11:09 crc kubenswrapper[4926]: I1125 20:11:09.911925 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-df6ml" event={"ID":"78e25de7-1efb-4c83-be89-c9fe6cb7727b","Type":"ContainerDied","Data":"79c696313d5edae22d99031bcd1cf0beaf12b61f8b46579e4432abd2567b27ac"} Nov 25 20:11:09 crc kubenswrapper[4926]: I1125 20:11:09.912449 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/crc-debug-df6ml" event={"ID":"78e25de7-1efb-4c83-be89-c9fe6cb7727b","Type":"ContainerStarted","Data":"2eb587cb532d60a25055022c9995a468e80ca25811c0c9a18266ebfb800cf99a"} Nov 25 20:11:09 crc kubenswrapper[4926]: I1125 20:11:09.968393 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-df6ml"] Nov 25 20:11:09 crc kubenswrapper[4926]: I1125 20:11:09.980897 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rtnrl/crc-debug-df6ml"] Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.022651 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.163961 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7t4wl\" (UniqueName: \"kubernetes.io/projected/78e25de7-1efb-4c83-be89-c9fe6cb7727b-kube-api-access-7t4wl\") pod \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.164143 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78e25de7-1efb-4c83-be89-c9fe6cb7727b-host\") pod \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\" (UID: \"78e25de7-1efb-4c83-be89-c9fe6cb7727b\") " Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.164300 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78e25de7-1efb-4c83-be89-c9fe6cb7727b-host" (OuterVolumeSpecName: "host") pod "78e25de7-1efb-4c83-be89-c9fe6cb7727b" (UID: "78e25de7-1efb-4c83-be89-c9fe6cb7727b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.165542 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78e25de7-1efb-4c83-be89-c9fe6cb7727b-host\") on node \"crc\" DevicePath \"\"" Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.180645 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78e25de7-1efb-4c83-be89-c9fe6cb7727b-kube-api-access-7t4wl" (OuterVolumeSpecName: "kube-api-access-7t4wl") pod "78e25de7-1efb-4c83-be89-c9fe6cb7727b" (UID: "78e25de7-1efb-4c83-be89-c9fe6cb7727b"). InnerVolumeSpecName "kube-api-access-7t4wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.267907 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7t4wl\" (UniqueName: \"kubernetes.io/projected/78e25de7-1efb-4c83-be89-c9fe6cb7727b-kube-api-access-7t4wl\") on node \"crc\" DevicePath \"\"" Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.932571 4926 scope.go:117] "RemoveContainer" containerID="79c696313d5edae22d99031bcd1cf0beaf12b61f8b46579e4432abd2567b27ac" Nov 25 20:11:11 crc kubenswrapper[4926]: I1125 20:11:11.932594 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/crc-debug-df6ml" Nov 25 20:11:12 crc kubenswrapper[4926]: I1125 20:11:12.342287 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78e25de7-1efb-4c83-be89-c9fe6cb7727b" path="/var/lib/kubelet/pods/78e25de7-1efb-4c83-be89-c9fe6cb7727b/volumes" Nov 25 20:11:18 crc kubenswrapper[4926]: I1125 20:11:18.329291 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:11:18 crc kubenswrapper[4926]: E1125 20:11:18.330084 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:11:31 crc kubenswrapper[4926]: I1125 20:11:31.329483 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:11:31 crc kubenswrapper[4926]: E1125 20:11:31.330294 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:11:37 crc kubenswrapper[4926]: I1125 20:11:37.439880 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5f9c5dcd5b-mpcsb_07453d33-7ce0-41da-bfbe-f496f1035621/barbican-api/0.log" Nov 25 20:11:37 crc kubenswrapper[4926]: I1125 20:11:37.645719 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5f9c5dcd5b-mpcsb_07453d33-7ce0-41da-bfbe-f496f1035621/barbican-api-log/0.log" Nov 25 20:11:37 crc kubenswrapper[4926]: I1125 20:11:37.738937 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5dc4bc78cd-2jtj7_82906e7a-0022-49ce-8cf9-10366d783d5e/barbican-keystone-listener/0.log" Nov 25 20:11:37 crc kubenswrapper[4926]: I1125 20:11:37.778889 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5dc4bc78cd-2jtj7_82906e7a-0022-49ce-8cf9-10366d783d5e/barbican-keystone-listener-log/0.log" Nov 25 20:11:37 crc kubenswrapper[4926]: I1125 20:11:37.905472 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5c446c68b5-c59jz_14c84d99-1576-439c-86b0-bc90f22a286f/barbican-worker/0.log" Nov 25 20:11:37 crc kubenswrapper[4926]: I1125 20:11:37.955629 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5c446c68b5-c59jz_14c84d99-1576-439c-86b0-bc90f22a286f/barbican-worker-log/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.131514 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-2j9k9_c38a7543-0881-45e4-b1b3-1c515379526a/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.227959 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9803cc53-b277-4aec-af80-cab4b4638a02/ceilometer-central-agent/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.303095 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9803cc53-b277-4aec-af80-cab4b4638a02/ceilometer-notification-agent/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.369256 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9803cc53-b277-4aec-af80-cab4b4638a02/sg-core/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.402972 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_9803cc53-b277-4aec-af80-cab4b4638a02/proxy-httpd/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.595885 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ad60f4ee-1959-48b8-9ac6-ba95313bd024/cinder-api-log/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.763333 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_7d6bbb74-3796-44c9-a153-84fd8de6f338/cinder-backup/1.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.770877 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ad60f4ee-1959-48b8-9ac6-ba95313bd024/cinder-api/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.856174 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_7d6bbb74-3796-44c9-a153-84fd8de6f338/cinder-backup/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.896718 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_7d6bbb74-3796-44c9-a153-84fd8de6f338/probe/0.log" Nov 25 20:11:38 crc kubenswrapper[4926]: I1125 20:11:38.987770 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_ad6a0baa-57a5-47d8-81fc-4395a6f4079a/cinder-scheduler/1.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.057447 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_ad6a0baa-57a5-47d8-81fc-4395a6f4079a/cinder-scheduler/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.143507 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_ad6a0baa-57a5-47d8-81fc-4395a6f4079a/probe/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.234385 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_899c3fbb-eb5b-46b6-b535-27d400c4b40e/cinder-volume/1.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.281037 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_899c3fbb-eb5b-46b6-b535-27d400c4b40e/cinder-volume/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.387040 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_899c3fbb-eb5b-46b6-b535-27d400c4b40e/probe/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.472147 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_7a824532-6df2-4a8d-a6ae-1859686e6bb5/cinder-volume/1.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.628273 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_7a824532-6df2-4a8d-a6ae-1859686e6bb5/cinder-volume/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.651563 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_7a824532-6df2-4a8d-a6ae-1859686e6bb5/probe/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.737501 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-n6n6s_aeee2488-6e27-4b22-aa01-182c3c7429fe/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.879585 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-2l97d_d2105a62-0fbe-4a45-94bf-81b59115a28b/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:39 crc kubenswrapper[4926]: I1125 20:11:39.947279 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-86b86f76c-cbrkb_3abe3008-a4ac-4efe-b063-0234232afac3/init/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.190908 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-86b86f76c-cbrkb_3abe3008-a4ac-4efe-b063-0234232afac3/init/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.265635 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-dk7fv_aff27f8d-6ea3-4441-b1a2-f9a87fdaab13/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.299618 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-86b86f76c-cbrkb_3abe3008-a4ac-4efe-b063-0234232afac3/dnsmasq-dns/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.481358 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_115acdd7-4a4c-420d-9dea-6e821cbf8bc9/glance-httpd/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.486186 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_115acdd7-4a4c-420d-9dea-6e821cbf8bc9/glance-log/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.657981 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_f1d50ae6-b28d-4604-94dd-81987f5b63fa/glance-log/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.682406 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_f1d50ae6-b28d-4604-94dd-81987f5b63fa/glance-httpd/0.log" Nov 25 20:11:40 crc kubenswrapper[4926]: I1125 20:11:40.852305 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-58ffdb7978-lnv9j_f0edd267-7b26-44cc-a576-552e8ff49e66/horizon/0.log" Nov 25 20:11:41 crc kubenswrapper[4926]: I1125 20:11:41.004029 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-fxxsx_b2103eff-585d-4c8b-a486-930c76be6884/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:41 crc kubenswrapper[4926]: I1125 20:11:41.139973 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-vm4lt_9f41d9b7-a000-45f2-8132-79eea20295bf/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:41 crc kubenswrapper[4926]: I1125 20:11:41.420951 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401621-zm5n9_e2a52929-6ede-453c-a04b-cbe357ca6476/keystone-cron/0.log" Nov 25 20:11:41 crc kubenswrapper[4926]: I1125 20:11:41.759388 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401681-7zh4v_fd006279-ba5f-4a25-814a-17004757d8a3/keystone-cron/0.log" Nov 25 20:11:41 crc kubenswrapper[4926]: I1125 20:11:41.800681 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1/kube-state-metrics/3.log" Nov 25 20:11:41 crc kubenswrapper[4926]: I1125 20:11:41.941678 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-58ffdb7978-lnv9j_f0edd267-7b26-44cc-a576-552e8ff49e66/horizon-log/0.log" Nov 25 20:11:42 crc kubenswrapper[4926]: I1125 20:11:42.031349 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_e3d61c67-f446-4b4b-a9fb-9e62d24c7cb1/kube-state-metrics/2.log" Nov 25 20:11:42 crc kubenswrapper[4926]: I1125 20:11:42.044950 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-99d7m_3967079a-4360-41e8-85e9-97c74ace2a79/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:42 crc kubenswrapper[4926]: I1125 20:11:42.637507 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-87c6cbb57-5vxgs_6e439ef2-db64-41e2-853a-a16e48f1607d/neutron-httpd/0.log" Nov 25 20:11:42 crc kubenswrapper[4926]: I1125 20:11:42.717954 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-87c6cbb57-5vxgs_6e439ef2-db64-41e2-853a-a16e48f1607d/neutron-api/0.log" Nov 25 20:11:42 crc kubenswrapper[4926]: I1125 20:11:42.769729 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-bzx2h_8e6b918a-9ab8-44c6-9b56-48189c2cbaf6/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:43 crc kubenswrapper[4926]: I1125 20:11:43.649689 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_3c274cca-be43-418d-9c50-adf2a19334e7/nova-cell0-conductor-conductor/0.log" Nov 25 20:11:44 crc kubenswrapper[4926]: I1125 20:11:44.192425 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_da257cb5-92cf-405d-b671-ed2123802153/nova-api-log/0.log" Nov 25 20:11:44 crc kubenswrapper[4926]: I1125 20:11:44.208218 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_5574e7bb-0691-4368-acd1-8e50441657e2/nova-cell1-conductor-conductor/0.log" Nov 25 20:11:44 crc kubenswrapper[4926]: I1125 20:11:44.328877 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:11:44 crc kubenswrapper[4926]: E1125 20:11:44.329327 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:11:44 crc kubenswrapper[4926]: I1125 20:11:44.582609 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ea5ab9dd-7cab-491d-820f-fb35dbe3e2ae/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 20:11:44 crc kubenswrapper[4926]: I1125 20:11:44.625098 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_da257cb5-92cf-405d-b671-ed2123802153/nova-api-api/0.log" Nov 25 20:11:44 crc kubenswrapper[4926]: I1125 20:11:44.914610 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-w6frg_983d045a-121a-40e1-9948-ecd9a1569e26/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:44 crc kubenswrapper[4926]: I1125 20:11:44.929353 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9b3135e8-5795-4aaf-9c02-10dc929bb4a3/nova-metadata-log/0.log" Nov 25 20:11:45 crc kubenswrapper[4926]: I1125 20:11:45.413945 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_33cc50f8-8d20-4fa6-a697-2e508b70f929/nova-scheduler-scheduler/0.log" Nov 25 20:11:45 crc kubenswrapper[4926]: I1125 20:11:45.709990 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ac5169d3-6efd-4929-8f0a-b8cfae948182/mysql-bootstrap/0.log" Nov 25 20:11:45 crc kubenswrapper[4926]: I1125 20:11:45.924873 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ac5169d3-6efd-4929-8f0a-b8cfae948182/galera/0.log" Nov 25 20:11:45 crc kubenswrapper[4926]: I1125 20:11:45.928879 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ac5169d3-6efd-4929-8f0a-b8cfae948182/mysql-bootstrap/0.log" Nov 25 20:11:46 crc kubenswrapper[4926]: I1125 20:11:46.184604 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b04996f9-1035-4982-bd9b-f96ee30cd663/mysql-bootstrap/0.log" Nov 25 20:11:46 crc kubenswrapper[4926]: I1125 20:11:46.341184 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b04996f9-1035-4982-bd9b-f96ee30cd663/mysql-bootstrap/0.log" Nov 25 20:11:46 crc kubenswrapper[4926]: I1125 20:11:46.541850 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b04996f9-1035-4982-bd9b-f96ee30cd663/galera/1.log" Nov 25 20:11:46 crc kubenswrapper[4926]: I1125 20:11:46.570952 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b04996f9-1035-4982-bd9b-f96ee30cd663/galera/0.log" Nov 25 20:11:46 crc kubenswrapper[4926]: I1125 20:11:46.764281 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_436e3ae7-0a2a-4cba-9416-804f6cba9b26/openstackclient/0.log" Nov 25 20:11:47 crc kubenswrapper[4926]: I1125 20:11:47.108769 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-6mv24_e0494ec7-8e4a-4a0a-b67c-cd8c55f6a230/openstack-network-exporter/0.log" Nov 25 20:11:47 crc kubenswrapper[4926]: I1125 20:11:47.384834 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qd88n_cc5c3159-fdbc-49d0-82e6-10ff2e855e5b/ovsdb-server-init/0.log" Nov 25 20:11:47 crc kubenswrapper[4926]: I1125 20:11:47.717423 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qd88n_cc5c3159-fdbc-49d0-82e6-10ff2e855e5b/ovsdb-server-init/0.log" Nov 25 20:11:47 crc kubenswrapper[4926]: I1125 20:11:47.936445 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qd88n_cc5c3159-fdbc-49d0-82e6-10ff2e855e5b/ovsdb-server/0.log" Nov 25 20:11:48 crc kubenswrapper[4926]: I1125 20:11:48.006944 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-qd88n_cc5c3159-fdbc-49d0-82e6-10ff2e855e5b/ovs-vswitchd/0.log" Nov 25 20:11:48 crc kubenswrapper[4926]: I1125 20:11:48.289619 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-wwqdd_b840840b-ff6b-439b-b043-7afd451ca6e7/ovn-controller/0.log" Nov 25 20:11:48 crc kubenswrapper[4926]: I1125 20:11:48.532062 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-mn7s6_c91c3c0b-35d3-402e-a758-672676d30d1d/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:48 crc kubenswrapper[4926]: I1125 20:11:48.717814 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1544f9d3-aef4-4a8c-af9f-af4bea56f954/openstack-network-exporter/0.log" Nov 25 20:11:48 crc kubenswrapper[4926]: I1125 20:11:48.845487 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1544f9d3-aef4-4a8c-af9f-af4bea56f954/ovn-northd/0.log" Nov 25 20:11:49 crc kubenswrapper[4926]: I1125 20:11:49.042798 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_077c6b4b-7e98-469d-8ab4-48833073ec4c/openstack-network-exporter/0.log" Nov 25 20:11:49 crc kubenswrapper[4926]: I1125 20:11:49.180066 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_9b3135e8-5795-4aaf-9c02-10dc929bb4a3/nova-metadata-metadata/0.log" Nov 25 20:11:49 crc kubenswrapper[4926]: I1125 20:11:49.200218 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_077c6b4b-7e98-469d-8ab4-48833073ec4c/ovsdbserver-nb/0.log" Nov 25 20:11:49 crc kubenswrapper[4926]: I1125 20:11:49.393963 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_42262798-4ac0-4099-b0b9-1820f77802cc/openstack-network-exporter/0.log" Nov 25 20:11:49 crc kubenswrapper[4926]: I1125 20:11:49.399754 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_42262798-4ac0-4099-b0b9-1820f77802cc/ovsdbserver-sb/0.log" Nov 25 20:11:49 crc kubenswrapper[4926]: I1125 20:11:49.884410 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-547c7d8d4d-wq9d8_89428ba2-a2c6-40eb-9e3f-878ebf7193c1/placement-api/0.log" Nov 25 20:11:49 crc kubenswrapper[4926]: I1125 20:11:49.972702 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-547c7d8d4d-wq9d8_89428ba2-a2c6-40eb-9e3f-878ebf7193c1/placement-log/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.043687 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_301464c9-c4d4-4b22-8d83-1df733e32f25/init-config-reloader/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.184563 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_301464c9-c4d4-4b22-8d83-1df733e32f25/init-config-reloader/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.201218 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_301464c9-c4d4-4b22-8d83-1df733e32f25/config-reloader/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.281421 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_301464c9-c4d4-4b22-8d83-1df733e32f25/prometheus/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.376601 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_301464c9-c4d4-4b22-8d83-1df733e32f25/thanos-sidecar/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.423392 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7dc866d94f-flgn2_33f80090-20c8-407b-86ae-7ba88229140d/keystone-api/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.492547 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_249e603a-e4df-4b46-941d-ab40c5374c95/setup-container/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.671642 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_249e603a-e4df-4b46-941d-ab40c5374c95/setup-container/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.746929 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_249e603a-e4df-4b46-941d-ab40c5374c95/rabbitmq/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.768661 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_e53ddff3-6cac-43f4-98c6-f909431098f1/setup-container/0.log" Nov 25 20:11:50 crc kubenswrapper[4926]: I1125 20:11:50.984164 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c9470d78-c381-4be0-b06e-13e3f97422ac/setup-container/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.043864 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_e53ddff3-6cac-43f4-98c6-f909431098f1/setup-container/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.052311 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_e53ddff3-6cac-43f4-98c6-f909431098f1/rabbitmq/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.265676 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c9470d78-c381-4be0-b06e-13e3f97422ac/setup-container/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.296077 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c9470d78-c381-4be0-b06e-13e3f97422ac/rabbitmq/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.315932 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-z9mnp_316453c4-7c58-4d26-aaa1-9da97a22bcb6/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.515045 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-fh5fm_a2a96977-a5be-4222-86bf-7caf90e17f8d/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.634469 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-t9x2f_06191797-281e-401d-ab9e-c394f6e5f19d/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.755859 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-v2dkf_7e80b012-93ee-4623-b5fd-7d65987728a9/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:51 crc kubenswrapper[4926]: I1125 20:11:51.875524 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-757rs_d4b6aaa5-99ab-4ecf-9b49-f16fa3b1ecee/ssh-known-hosts-edpm-deployment/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.169990 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5db9fd79b9-99khc_3842d4ae-3cdd-48ad-a374-dbf807481f6f/proxy-server/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.224762 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-4nt6x_c1e783fb-e6a7-4df9-9459-58dbf2d9f4b1/swift-ring-rebalance/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.317662 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5db9fd79b9-99khc_3842d4ae-3cdd-48ad-a374-dbf807481f6f/proxy-httpd/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.367015 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/account-auditor/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.425545 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/account-reaper/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.566856 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/account-replicator/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.627980 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/account-server/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.638331 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/container-auditor/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.712918 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/container-replicator/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.754932 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/container-server/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.835204 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/container-updater/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.918587 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/object-expirer/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.951994 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/object-auditor/0.log" Nov 25 20:11:52 crc kubenswrapper[4926]: I1125 20:11:52.995487 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/object-replicator/0.log" Nov 25 20:11:53 crc kubenswrapper[4926]: I1125 20:11:53.063619 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/object-server/0.log" Nov 25 20:11:53 crc kubenswrapper[4926]: I1125 20:11:53.122125 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/object-updater/0.log" Nov 25 20:11:53 crc kubenswrapper[4926]: I1125 20:11:53.225508 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/rsync/0.log" Nov 25 20:11:53 crc kubenswrapper[4926]: I1125 20:11:53.231442 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_e1d8eee0-eb0b-41ad-b486-e7b20ffee29a/swift-recon-cron/0.log" Nov 25 20:11:53 crc kubenswrapper[4926]: I1125 20:11:53.462670 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-5ljfs_d752f8f9-0324-4383-9157-f1e23a46572b/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:53 crc kubenswrapper[4926]: I1125 20:11:53.676858 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-6rt7k_9a577401-9bfd-42a3-82f9-185a545b6d3b/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 20:11:54 crc kubenswrapper[4926]: I1125 20:11:54.158660 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_701f3ca1-77fd-4044-bdb3-e926855d035e/tempest-tests-tempest-tests-runner/0.log" Nov 25 20:11:54 crc kubenswrapper[4926]: I1125 20:11:54.861692 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_2f870c30-7159-4613-a2a3-bee7bf700ac8/watcher-applier/0.log" Nov 25 20:11:56 crc kubenswrapper[4926]: I1125 20:11:56.192637 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_e85535cf-74ad-4608-9752-f44beb920a02/watcher-api-log/0.log" Nov 25 20:11:56 crc kubenswrapper[4926]: I1125 20:11:56.330452 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:11:56 crc kubenswrapper[4926]: E1125 20:11:56.330685 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:11:57 crc kubenswrapper[4926]: I1125 20:11:57.376811 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_29d2e355-bed6-4dab-98c3-e2dc1134d327/watcher-decision-engine/0.log" Nov 25 20:12:00 crc kubenswrapper[4926]: I1125 20:12:00.800196 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_e85535cf-74ad-4608-9752-f44beb920a02/watcher-api/0.log" Nov 25 20:12:07 crc kubenswrapper[4926]: I1125 20:12:07.094027 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_b7b8348c-1f7d-41f0-9f36-29f6d84ef2d1/memcached/0.log" Nov 25 20:12:11 crc kubenswrapper[4926]: I1125 20:12:11.328938 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:12:11 crc kubenswrapper[4926]: E1125 20:12:11.329815 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:12:25 crc kubenswrapper[4926]: I1125 20:12:25.329770 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:12:25 crc kubenswrapper[4926]: E1125 20:12:25.330507 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:12:25 crc kubenswrapper[4926]: I1125 20:12:25.579636 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw_7a85c324-96e2-42e0-9c20-8dfcd6d203d4/util/0.log" Nov 25 20:12:25 crc kubenswrapper[4926]: I1125 20:12:25.751719 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw_7a85c324-96e2-42e0-9c20-8dfcd6d203d4/util/0.log" Nov 25 20:12:25 crc kubenswrapper[4926]: I1125 20:12:25.785102 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw_7a85c324-96e2-42e0-9c20-8dfcd6d203d4/pull/0.log" Nov 25 20:12:25 crc kubenswrapper[4926]: I1125 20:12:25.796897 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw_7a85c324-96e2-42e0-9c20-8dfcd6d203d4/pull/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.014041 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw_7a85c324-96e2-42e0-9c20-8dfcd6d203d4/util/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.025664 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw_7a85c324-96e2-42e0-9c20-8dfcd6d203d4/pull/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.083326 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_72ad6b623d345e9bd5376d963e8d452b314017cb000224676522ab8301gxpsw_7a85c324-96e2-42e0-9c20-8dfcd6d203d4/extract/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.197830 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hhl9b_c8395389-762a-497d-972e-0987350a9a00/kube-rbac-proxy/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.204556 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hhl9b_c8395389-762a-497d-972e-0987350a9a00/manager/3.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.270934 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hhl9b_c8395389-762a-497d-972e-0987350a9a00/manager/2.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.400439 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-dxhsp_421c1930-795c-4e93-9865-bff40d49ddf5/manager/3.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.415646 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-dxhsp_421c1930-795c-4e93-9865-bff40d49ddf5/kube-rbac-proxy/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.464354 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-dxhsp_421c1930-795c-4e93-9865-bff40d49ddf5/manager/2.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.618753 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-bpsp8_e949ca02-dbd2-4361-8b44-a498d1ec4c13/kube-rbac-proxy/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.636921 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-bpsp8_e949ca02-dbd2-4361-8b44-a498d1ec4c13/manager/3.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.676646 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-bpsp8_e949ca02-dbd2-4361-8b44-a498d1ec4c13/manager/2.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.801855 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-8w6rx_c8322c05-5b96-4489-87a7-1677f90df80c/kube-rbac-proxy/0.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.842628 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-8w6rx_c8322c05-5b96-4489-87a7-1677f90df80c/manager/3.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.861165 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-8w6rx_c8322c05-5b96-4489-87a7-1677f90df80c/manager/2.log" Nov 25 20:12:26 crc kubenswrapper[4926]: I1125 20:12:26.988642 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5q59t_3b14286a-e339-4bd3-835c-67287c341869/kube-rbac-proxy/0.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.055784 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5q59t_3b14286a-e339-4bd3-835c-67287c341869/manager/1.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.068277 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5q59t_3b14286a-e339-4bd3-835c-67287c341869/manager/2.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.209807 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-h55p4_1df300a3-1d64-4e46-a0b5-9fe0bf029321/kube-rbac-proxy/0.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.234546 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-h55p4_1df300a3-1d64-4e46-a0b5-9fe0bf029321/manager/3.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.238481 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-h55p4_1df300a3-1d64-4e46-a0b5-9fe0bf029321/manager/2.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.378982 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-l4vqr_c67a3051-deee-4c35-b2fd-73f0f96ccbac/kube-rbac-proxy/0.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.395310 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-l4vqr_c67a3051-deee-4c35-b2fd-73f0f96ccbac/manager/3.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.421015 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-l4vqr_c67a3051-deee-4c35-b2fd-73f0f96ccbac/manager/2.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.600515 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-89dkl_b70bd0b1-5555-49f4-ae5f-dfeebd005029/kube-rbac-proxy/0.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.609224 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-89dkl_b70bd0b1-5555-49f4-ae5f-dfeebd005029/manager/4.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.773522 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-89dkl_b70bd0b1-5555-49f4-ae5f-dfeebd005029/manager/3.log" Nov 25 20:12:27 crc kubenswrapper[4926]: I1125 20:12:27.878805 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-gr8fl_9dfe0bac-7a60-47c9-bef9-e34a75d23521/kube-rbac-proxy/0.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.002419 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-gr8fl_9dfe0bac-7a60-47c9-bef9-e34a75d23521/manager/1.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.024310 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-gr8fl_9dfe0bac-7a60-47c9-bef9-e34a75d23521/manager/2.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.091948 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-44shk_5859a238-ed77-4ef7-ac69-295bd1c875c3/kube-rbac-proxy/0.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.164758 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-44shk_5859a238-ed77-4ef7-ac69-295bd1c875c3/manager/3.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.209007 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-44shk_5859a238-ed77-4ef7-ac69-295bd1c875c3/manager/2.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.287100 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_c613eed5-f72e-4b4d-8283-5aa4e6241157/kube-rbac-proxy/0.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.362886 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_c613eed5-f72e-4b4d-8283-5aa4e6241157/manager/3.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.398630 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-5nvnv_c613eed5-f72e-4b4d-8283-5aa4e6241157/manager/2.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.468383 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-rslqc_b4c6b194-9a8e-4cdb-a0e0-e67dce03328f/kube-rbac-proxy/0.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.580354 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-rslqc_b4c6b194-9a8e-4cdb-a0e0-e67dce03328f/manager/3.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.584852 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-rslqc_b4c6b194-9a8e-4cdb-a0e0-e67dce03328f/manager/2.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.695923 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rl7hc_306a2bb2-20b9-436d-809a-55499e85e4d6/kube-rbac-proxy/0.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.818892 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rl7hc_306a2bb2-20b9-436d-809a-55499e85e4d6/manager/3.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.832899 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rl7hc_306a2bb2-20b9-436d-809a-55499e85e4d6/manager/2.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.903134 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-s4wxr_f9d1a5dc-de6e-45fa-ab5d-1de529f40894/kube-rbac-proxy/0.log" Nov 25 20:12:28 crc kubenswrapper[4926]: I1125 20:12:28.984960 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-s4wxr_f9d1a5dc-de6e-45fa-ab5d-1de529f40894/manager/4.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.061926 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-s4wxr_f9d1a5dc-de6e-45fa-ab5d-1de529f40894/manager/3.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.104473 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74_4e869634-c2f9-4248-8ad7-dd9af0315f2b/kube-rbac-proxy/0.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.232738 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74_4e869634-c2f9-4248-8ad7-dd9af0315f2b/manager/2.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.310226 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bp7s74_4e869634-c2f9-4248-8ad7-dd9af0315f2b/manager/1.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.447530 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-79c67b7c89-tcqww_e15f3b97-0859-4f12-87cd-514fab3d75aa/manager/2.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.450113 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-79c67b7c89-tcqww_e15f3b97-0859-4f12-87cd-514fab3d75aa/manager/1.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.635367 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7d958449d8-hxqgw_50b0b29f-bc51-4109-88ce-84d3223fc78e/operator/1.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.670236 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6xpmt_751df222-409a-4aaf-9558-2c777866237c/registry-server/0.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.726346 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7d958449d8-hxqgw_50b0b29f-bc51-4109-88ce-84d3223fc78e/operator/0.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.889841 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-9f2dg_d992fc2a-a506-4c10-a8fa-1e3416074e73/manager/2.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.902934 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-9f2dg_d992fc2a-a506-4c10-a8fa-1e3416074e73/manager/3.log" Nov 25 20:12:29 crc kubenswrapper[4926]: I1125 20:12:29.910279 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-9f2dg_d992fc2a-a506-4c10-a8fa-1e3416074e73/kube-rbac-proxy/0.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.033762 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-k8j22_74627669-e952-4db6-b082-5e7bd38b03b3/kube-rbac-proxy/0.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.083029 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-k8j22_74627669-e952-4db6-b082-5e7bd38b03b3/manager/3.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.114077 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-k8j22_74627669-e952-4db6-b082-5e7bd38b03b3/manager/2.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.165890 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-v9gvn_abc17280-a647-4d60-8a1a-d01505970238/operator/2.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.232812 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-v9gvn_abc17280-a647-4d60-8a1a-d01505970238/operator/1.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.299583 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-gcvkp_230b098e-8a89-417e-b5aa-994695273779/manager/3.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.305146 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-gcvkp_230b098e-8a89-417e-b5aa-994695273779/kube-rbac-proxy/0.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.387418 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-gcvkp_230b098e-8a89-417e-b5aa-994695273779/manager/2.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.468575 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-mc5kd_5885db97-a86c-482e-9851-2d8351dc0c3a/kube-rbac-proxy/0.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.542028 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-mc5kd_5885db97-a86c-482e-9851-2d8351dc0c3a/manager/3.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.563389 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-mc5kd_5885db97-a86c-482e-9851-2d8351dc0c3a/manager/2.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.661813 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-5nnqb_7ac21b6b-e21a-43db-acf1-cce61bf188ef/kube-rbac-proxy/0.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.710142 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-5nnqb_7ac21b6b-e21a-43db-acf1-cce61bf188ef/manager/2.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.733858 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-5nnqb_7ac21b6b-e21a-43db-acf1-cce61bf188ef/manager/1.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.834029 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-647d45fc97-x65c4_596d3616-ddec-489c-be4d-7e340f9e2acb/kube-rbac-proxy/0.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.873591 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-647d45fc97-x65c4_596d3616-ddec-489c-be4d-7e340f9e2acb/manager/3.log" Nov 25 20:12:30 crc kubenswrapper[4926]: I1125 20:12:30.919030 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-647d45fc97-x65c4_596d3616-ddec-489c-be4d-7e340f9e2acb/manager/2.log" Nov 25 20:12:36 crc kubenswrapper[4926]: I1125 20:12:36.329773 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:12:36 crc kubenswrapper[4926]: E1125 20:12:36.330548 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:12:48 crc kubenswrapper[4926]: I1125 20:12:48.329339 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:12:48 crc kubenswrapper[4926]: E1125 20:12:48.330399 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:12:51 crc kubenswrapper[4926]: I1125 20:12:51.063990 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-x5l94_455c82d6-6c13-4315-9610-a50e40fb528f/control-plane-machine-set-operator/0.log" Nov 25 20:12:51 crc kubenswrapper[4926]: I1125 20:12:51.211506 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tfmxn_18084ba7-0cc7-4aff-b740-277d5dfbd2c3/kube-rbac-proxy/0.log" Nov 25 20:12:51 crc kubenswrapper[4926]: I1125 20:12:51.266860 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tfmxn_18084ba7-0cc7-4aff-b740-277d5dfbd2c3/machine-api-operator/0.log" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.279681 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6nrtv"] Nov 25 20:12:54 crc kubenswrapper[4926]: E1125 20:12:54.280762 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78e25de7-1efb-4c83-be89-c9fe6cb7727b" containerName="container-00" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.280779 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="78e25de7-1efb-4c83-be89-c9fe6cb7727b" containerName="container-00" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.281061 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="78e25de7-1efb-4c83-be89-c9fe6cb7727b" containerName="container-00" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.283105 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.292388 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6nrtv"] Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.336706 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4sq4\" (UniqueName: \"kubernetes.io/projected/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-kube-api-access-s4sq4\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.336786 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-utilities\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.336817 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-catalog-content\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.438640 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4sq4\" (UniqueName: \"kubernetes.io/projected/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-kube-api-access-s4sq4\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.438720 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-utilities\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.438767 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-catalog-content\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.439253 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-utilities\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.439306 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-catalog-content\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.468323 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4sq4\" (UniqueName: \"kubernetes.io/projected/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-kube-api-access-s4sq4\") pod \"redhat-operators-6nrtv\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:54 crc kubenswrapper[4926]: I1125 20:12:54.616235 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:12:55 crc kubenswrapper[4926]: I1125 20:12:55.128265 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6nrtv"] Nov 25 20:12:56 crc kubenswrapper[4926]: I1125 20:12:56.009692 4926 generic.go:334] "Generic (PLEG): container finished" podID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerID="d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f" exitCode=0 Nov 25 20:12:56 crc kubenswrapper[4926]: I1125 20:12:56.010086 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nrtv" event={"ID":"b40a6ca8-26b8-46d0-b00a-1e8b86fca553","Type":"ContainerDied","Data":"d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f"} Nov 25 20:12:56 crc kubenswrapper[4926]: I1125 20:12:56.010610 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nrtv" event={"ID":"b40a6ca8-26b8-46d0-b00a-1e8b86fca553","Type":"ContainerStarted","Data":"53b080bd818be1941aa6bc367291dda09631e91da589d3d613c150d57ad2953d"} Nov 25 20:12:57 crc kubenswrapper[4926]: I1125 20:12:57.025653 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nrtv" event={"ID":"b40a6ca8-26b8-46d0-b00a-1e8b86fca553","Type":"ContainerStarted","Data":"2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d"} Nov 25 20:13:01 crc kubenswrapper[4926]: I1125 20:13:01.067891 4926 generic.go:334] "Generic (PLEG): container finished" podID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerID="2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d" exitCode=0 Nov 25 20:13:01 crc kubenswrapper[4926]: I1125 20:13:01.067987 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nrtv" event={"ID":"b40a6ca8-26b8-46d0-b00a-1e8b86fca553","Type":"ContainerDied","Data":"2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d"} Nov 25 20:13:02 crc kubenswrapper[4926]: I1125 20:13:02.080218 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nrtv" event={"ID":"b40a6ca8-26b8-46d0-b00a-1e8b86fca553","Type":"ContainerStarted","Data":"6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b"} Nov 25 20:13:02 crc kubenswrapper[4926]: I1125 20:13:02.101889 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6nrtv" podStartSLOduration=2.6203155689999997 podStartE2EDuration="8.101870084s" podCreationTimestamp="2025-11-25 20:12:54 +0000 UTC" firstStartedPulling="2025-11-25 20:12:56.011797526 +0000 UTC m=+7206.397311131" lastFinishedPulling="2025-11-25 20:13:01.493352041 +0000 UTC m=+7211.878865646" observedRunningTime="2025-11-25 20:13:02.098794941 +0000 UTC m=+7212.484308556" watchObservedRunningTime="2025-11-25 20:13:02.101870084 +0000 UTC m=+7212.487383699" Nov 25 20:13:03 crc kubenswrapper[4926]: I1125 20:13:03.330084 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:13:03 crc kubenswrapper[4926]: E1125 20:13:03.331269 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:13:04 crc kubenswrapper[4926]: I1125 20:13:04.616896 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:13:04 crc kubenswrapper[4926]: I1125 20:13:04.617285 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:13:05 crc kubenswrapper[4926]: I1125 20:13:05.666632 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6nrtv" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="registry-server" probeResult="failure" output=< Nov 25 20:13:05 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 20:13:05 crc kubenswrapper[4926]: > Nov 25 20:13:05 crc kubenswrapper[4926]: I1125 20:13:05.668006 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-x8r5t_5fd3c793-dd9b-475c-b527-51c52d21e018/cert-manager-controller/1.log" Nov 25 20:13:05 crc kubenswrapper[4926]: I1125 20:13:05.738169 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-x8r5t_5fd3c793-dd9b-475c-b527-51c52d21e018/cert-manager-controller/0.log" Nov 25 20:13:05 crc kubenswrapper[4926]: I1125 20:13:05.923071 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fzlrf_e5a12f1c-5ece-4ecc-a24b-6570902b3f18/cert-manager-cainjector/0.log" Nov 25 20:13:05 crc kubenswrapper[4926]: I1125 20:13:05.967212 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-qhdk2_fc0b92d7-9c5e-4cf4-b31e-19bb7c0e38ce/cert-manager-webhook/0.log" Nov 25 20:13:15 crc kubenswrapper[4926]: I1125 20:13:15.328822 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:13:15 crc kubenswrapper[4926]: E1125 20:13:15.329572 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:13:15 crc kubenswrapper[4926]: I1125 20:13:15.687933 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6nrtv" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="registry-server" probeResult="failure" output=< Nov 25 20:13:15 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 20:13:15 crc kubenswrapper[4926]: > Nov 25 20:13:19 crc kubenswrapper[4926]: I1125 20:13:19.558146 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-ftcsp_45e30b58-3691-41a3-afa2-ba29332f53aa/nmstate-console-plugin/0.log" Nov 25 20:13:19 crc kubenswrapper[4926]: I1125 20:13:19.766155 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-pjwht_d4ef439a-e50d-43bd-87d1-86fea196f862/nmstate-handler/0.log" Nov 25 20:13:19 crc kubenswrapper[4926]: I1125 20:13:19.804096 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-rmxx7_6a9e2b29-259a-4f14-8308-975a8d167ce3/kube-rbac-proxy/0.log" Nov 25 20:13:19 crc kubenswrapper[4926]: I1125 20:13:19.817550 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-rmxx7_6a9e2b29-259a-4f14-8308-975a8d167ce3/nmstate-metrics/0.log" Nov 25 20:13:20 crc kubenswrapper[4926]: I1125 20:13:20.036884 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-588xx_57273d48-1562-49dc-824b-1dc71dd89583/nmstate-operator/0.log" Nov 25 20:13:20 crc kubenswrapper[4926]: I1125 20:13:20.096866 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-gblp6_b84b6271-61b8-4bc0-8a1b-e09991c9e6af/nmstate-webhook/0.log" Nov 25 20:13:25 crc kubenswrapper[4926]: I1125 20:13:25.668270 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6nrtv" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="registry-server" probeResult="failure" output=< Nov 25 20:13:25 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 25 20:13:25 crc kubenswrapper[4926]: > Nov 25 20:13:30 crc kubenswrapper[4926]: I1125 20:13:30.337758 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:13:30 crc kubenswrapper[4926]: E1125 20:13:30.339325 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:13:34 crc kubenswrapper[4926]: I1125 20:13:34.698342 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:13:34 crc kubenswrapper[4926]: I1125 20:13:34.813164 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:13:34 crc kubenswrapper[4926]: I1125 20:13:34.966801 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6nrtv"] Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.398957 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6nrtv" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="registry-server" containerID="cri-o://6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b" gracePeriod=2 Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.465865 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-qccd5_1d98e353-4142-4bcb-b9fb-489ebe6313be/kube-rbac-proxy/0.log" Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.499506 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-qccd5_1d98e353-4142-4bcb-b9fb-489ebe6313be/controller/0.log" Nov 25 20:13:36 crc kubenswrapper[4926]: E1125 20:13:36.531957 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb40a6ca8_26b8_46d0_b00a_1e8b86fca553.slice/crio-6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb40a6ca8_26b8_46d0_b00a_1e8b86fca553.slice/crio-conmon-6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b.scope\": RecentStats: unable to find data in memory cache]" Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.610894 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-frr-files/0.log" Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.904484 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.910573 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-metrics/0.log" Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.925573 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-reloader/0.log" Nov 25 20:13:36 crc kubenswrapper[4926]: I1125 20:13:36.930667 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-reloader/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.008977 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-frr-files/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.041664 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4sq4\" (UniqueName: \"kubernetes.io/projected/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-kube-api-access-s4sq4\") pod \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.041869 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-catalog-content\") pod \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.041989 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-utilities\") pod \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\" (UID: \"b40a6ca8-26b8-46d0-b00a-1e8b86fca553\") " Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.042412 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-utilities" (OuterVolumeSpecName: "utilities") pod "b40a6ca8-26b8-46d0-b00a-1e8b86fca553" (UID: "b40a6ca8-26b8-46d0-b00a-1e8b86fca553"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.056612 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-kube-api-access-s4sq4" (OuterVolumeSpecName: "kube-api-access-s4sq4") pod "b40a6ca8-26b8-46d0-b00a-1e8b86fca553" (UID: "b40a6ca8-26b8-46d0-b00a-1e8b86fca553"). InnerVolumeSpecName "kube-api-access-s4sq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.144485 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.144736 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4sq4\" (UniqueName: \"kubernetes.io/projected/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-kube-api-access-s4sq4\") on node \"crc\" DevicePath \"\"" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.149916 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b40a6ca8-26b8-46d0-b00a-1e8b86fca553" (UID: "b40a6ca8-26b8-46d0-b00a-1e8b86fca553"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.168078 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-metrics/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.173524 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-reloader/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.190720 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-frr-files/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.231989 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-metrics/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.246546 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b40a6ca8-26b8-46d0-b00a-1e8b86fca553-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.409673 4926 generic.go:334] "Generic (PLEG): container finished" podID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerID="6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b" exitCode=0 Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.409710 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nrtv" event={"ID":"b40a6ca8-26b8-46d0-b00a-1e8b86fca553","Type":"ContainerDied","Data":"6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b"} Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.409735 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nrtv" event={"ID":"b40a6ca8-26b8-46d0-b00a-1e8b86fca553","Type":"ContainerDied","Data":"53b080bd818be1941aa6bc367291dda09631e91da589d3d613c150d57ad2953d"} Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.409738 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nrtv" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.409753 4926 scope.go:117] "RemoveContainer" containerID="6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.428655 4926 scope.go:117] "RemoveContainer" containerID="2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.447424 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6nrtv"] Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.453967 4926 scope.go:117] "RemoveContainer" containerID="d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.457395 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6nrtv"] Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.484507 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-reloader/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.498247 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-metrics/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.502674 4926 scope.go:117] "RemoveContainer" containerID="6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b" Nov 25 20:13:37 crc kubenswrapper[4926]: E1125 20:13:37.503145 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b\": container with ID starting with 6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b not found: ID does not exist" containerID="6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.503186 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b"} err="failed to get container status \"6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b\": rpc error: code = NotFound desc = could not find container \"6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b\": container with ID starting with 6bc23847c8806f09ccd26d116e12eb2bc90989b29cb3daa43d84b17bcd3bed6b not found: ID does not exist" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.503211 4926 scope.go:117] "RemoveContainer" containerID="2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.503152 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/cp-frr-files/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: E1125 20:13:37.503538 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d\": container with ID starting with 2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d not found: ID does not exist" containerID="2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.503573 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d"} err="failed to get container status \"2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d\": rpc error: code = NotFound desc = could not find container \"2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d\": container with ID starting with 2eb97b111168c13a69ed3d2a489481193e156a6cd396f33120598b9b5ee85d5d not found: ID does not exist" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.503598 4926 scope.go:117] "RemoveContainer" containerID="d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f" Nov 25 20:13:37 crc kubenswrapper[4926]: E1125 20:13:37.503857 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f\": container with ID starting with d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f not found: ID does not exist" containerID="d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.503880 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f"} err="failed to get container status \"d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f\": rpc error: code = NotFound desc = could not find container \"d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f\": container with ID starting with d4da911bb8d3709ce5602a157929cd185663f78875fb6c81b3fdafa760090a8f not found: ID does not exist" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.504715 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/controller/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.720186 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/kube-rbac-proxy/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.722322 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/frr-metrics/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.737055 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/kube-rbac-proxy-frr/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.956397 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/reloader/0.log" Nov 25 20:13:37 crc kubenswrapper[4926]: I1125 20:13:37.987776 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-5xq6x_4611d03c-74a2-41ac-b29d-5629fce0b40c/frr-k8s-webhook-server/0.log" Nov 25 20:13:38 crc kubenswrapper[4926]: I1125 20:13:38.211360 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5c55bddd9c-5nmb8_2ac11a24-0681-41d4-b943-8bf5b5396a40/manager/2.log" Nov 25 20:13:38 crc kubenswrapper[4926]: I1125 20:13:38.232555 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5c55bddd9c-5nmb8_2ac11a24-0681-41d4-b943-8bf5b5396a40/manager/1.log" Nov 25 20:13:38 crc kubenswrapper[4926]: I1125 20:13:38.339968 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" path="/var/lib/kubelet/pods/b40a6ca8-26b8-46d0-b00a-1e8b86fca553/volumes" Nov 25 20:13:38 crc kubenswrapper[4926]: I1125 20:13:38.395647 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-69bf9645b5-fww29_ce5ab3f1-def6-4a51-8a7a-4511cfe62bf0/webhook-server/0.log" Nov 25 20:13:38 crc kubenswrapper[4926]: I1125 20:13:38.765469 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zbmsj_14e6944d-e69a-42b5-8645-e272346dd12d/kube-rbac-proxy/0.log" Nov 25 20:13:39 crc kubenswrapper[4926]: I1125 20:13:39.394953 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-b9mg6_8b57b9fd-a2d0-46fe-bfc1-250c356b162b/frr/0.log" Nov 25 20:13:39 crc kubenswrapper[4926]: I1125 20:13:39.410889 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zbmsj_14e6944d-e69a-42b5-8645-e272346dd12d/speaker/0.log" Nov 25 20:13:43 crc kubenswrapper[4926]: I1125 20:13:43.328685 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:13:43 crc kubenswrapper[4926]: E1125 20:13:43.329641 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.152167 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd_6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131/util/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.353586 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd_6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131/pull/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.375502 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd_6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131/util/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.438330 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd_6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131/pull/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.567009 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd_6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131/util/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.590616 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd_6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131/pull/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.616333 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprwmd_6e3cd12d-0c9e-4cc8-8a2e-c3f9788ef131/extract/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.755311 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d_c148da96-30bb-41ad-8d86-1a3c60450fd7/util/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.945227 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d_c148da96-30bb-41ad-8d86-1a3c60450fd7/util/0.log" Nov 25 20:13:53 crc kubenswrapper[4926]: I1125 20:13:53.975411 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d_c148da96-30bb-41ad-8d86-1a3c60450fd7/pull/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.126108 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d_c148da96-30bb-41ad-8d86-1a3c60450fd7/pull/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.307363 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d_c148da96-30bb-41ad-8d86-1a3c60450fd7/pull/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.328033 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d_c148da96-30bb-41ad-8d86-1a3c60450fd7/util/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.356442 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92107pj4d_c148da96-30bb-41ad-8d86-1a3c60450fd7/extract/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.479663 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5xqw8_bc09add5-d326-40d3-9dce-cf8d6ca18360/extract-utilities/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.691934 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5xqw8_bc09add5-d326-40d3-9dce-cf8d6ca18360/extract-content/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.713958 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5xqw8_bc09add5-d326-40d3-9dce-cf8d6ca18360/extract-utilities/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.720726 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5xqw8_bc09add5-d326-40d3-9dce-cf8d6ca18360/extract-content/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.869208 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5xqw8_bc09add5-d326-40d3-9dce-cf8d6ca18360/extract-utilities/0.log" Nov 25 20:13:54 crc kubenswrapper[4926]: I1125 20:13:54.938094 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5xqw8_bc09add5-d326-40d3-9dce-cf8d6ca18360/extract-content/0.log" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.157490 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qlf69_5b2ed5fc-51b6-4b2a-abdc-312ac928d471/extract-utilities/0.log" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.329091 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:13:55 crc kubenswrapper[4926]: E1125 20:13:55.329354 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.359905 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qlf69_5b2ed5fc-51b6-4b2a-abdc-312ac928d471/extract-utilities/0.log" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.410986 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qlf69_5b2ed5fc-51b6-4b2a-abdc-312ac928d471/extract-content/0.log" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.434755 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qlf69_5b2ed5fc-51b6-4b2a-abdc-312ac928d471/extract-content/0.log" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.599108 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qlf69_5b2ed5fc-51b6-4b2a-abdc-312ac928d471/extract-utilities/0.log" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.767502 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qlf69_5b2ed5fc-51b6-4b2a-abdc-312ac928d471/extract-content/0.log" Nov 25 20:13:55 crc kubenswrapper[4926]: I1125 20:13:55.991190 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5xqw8_bc09add5-d326-40d3-9dce-cf8d6ca18360/registry-server/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.003484 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r_8b777648-39b0-4f4a-a63e-b4ee135ee3cd/util/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.009843 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qlf69_5b2ed5fc-51b6-4b2a-abdc-312ac928d471/registry-server/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.188219 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r_8b777648-39b0-4f4a-a63e-b4ee135ee3cd/util/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.202963 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r_8b777648-39b0-4f4a-a63e-b4ee135ee3cd/pull/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.213174 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r_8b777648-39b0-4f4a-a63e-b4ee135ee3cd/pull/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.400568 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r_8b777648-39b0-4f4a-a63e-b4ee135ee3cd/pull/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.457597 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r_8b777648-39b0-4f4a-a63e-b4ee135ee3cd/extract/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.483073 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6sj76r_8b777648-39b0-4f4a-a63e-b4ee135ee3cd/util/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.663951 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-5xksb_679c6a97-f755-4e07-8d02-13b4ab9616d1/marketplace-operator/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.768935 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5j4h6_757453db-5fee-408c-a5a1-2214d225129d/extract-utilities/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.951000 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5j4h6_757453db-5fee-408c-a5a1-2214d225129d/extract-content/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.951997 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5j4h6_757453db-5fee-408c-a5a1-2214d225129d/extract-content/0.log" Nov 25 20:13:56 crc kubenswrapper[4926]: I1125 20:13:56.997270 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5j4h6_757453db-5fee-408c-a5a1-2214d225129d/extract-utilities/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.314559 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5j4h6_757453db-5fee-408c-a5a1-2214d225129d/extract-utilities/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.315545 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-2xgvv_a165105a-b61e-4c9a-a0fd-b01562eea725/extract-utilities/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.348934 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5j4h6_757453db-5fee-408c-a5a1-2214d225129d/extract-content/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.554343 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-2xgvv_a165105a-b61e-4c9a-a0fd-b01562eea725/extract-utilities/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.631159 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5j4h6_757453db-5fee-408c-a5a1-2214d225129d/registry-server/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.671135 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-2xgvv_a165105a-b61e-4c9a-a0fd-b01562eea725/extract-content/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.715233 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-2xgvv_a165105a-b61e-4c9a-a0fd-b01562eea725/extract-content/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.862182 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-2xgvv_a165105a-b61e-4c9a-a0fd-b01562eea725/extract-utilities/0.log" Nov 25 20:13:57 crc kubenswrapper[4926]: I1125 20:13:57.875823 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-2xgvv_a165105a-b61e-4c9a-a0fd-b01562eea725/extract-content/0.log" Nov 25 20:13:58 crc kubenswrapper[4926]: I1125 20:13:58.135173 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-2xgvv_a165105a-b61e-4c9a-a0fd-b01562eea725/registry-server/0.log" Nov 25 20:14:08 crc kubenswrapper[4926]: I1125 20:14:08.328894 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:14:08 crc kubenswrapper[4926]: E1125 20:14:08.329698 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:14:12 crc kubenswrapper[4926]: I1125 20:14:12.136895 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-km4cj_5d4652b0-e12f-40d1-8370-e32b2aa51b96/prometheus-operator/0.log" Nov 25 20:14:12 crc kubenswrapper[4926]: I1125 20:14:12.467463 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7fbd78f954-829cx_4adc4849-d8a6-4eff-8fd9-bf801de1ab33/prometheus-operator-admission-webhook/0.log" Nov 25 20:14:12 crc kubenswrapper[4926]: I1125 20:14:12.517888 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7fbd78f954-cgpnh_f37f6813-7831-4557-91c3-499fa6f790a9/prometheus-operator-admission-webhook/0.log" Nov 25 20:14:12 crc kubenswrapper[4926]: I1125 20:14:12.664727 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-45q9w_48648219-c573-4084-a23b-17ef23df2666/operator/1.log" Nov 25 20:14:12 crc kubenswrapper[4926]: I1125 20:14:12.748319 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-45q9w_48648219-c573-4084-a23b-17ef23df2666/operator/0.log" Nov 25 20:14:12 crc kubenswrapper[4926]: I1125 20:14:12.784614 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-cq9q8_42a2f9e2-7492-45ef-9049-b617d5c1c36d/perses-operator/0.log" Nov 25 20:14:21 crc kubenswrapper[4926]: I1125 20:14:21.329286 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:14:21 crc kubenswrapper[4926]: E1125 20:14:21.330226 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:14:29 crc kubenswrapper[4926]: E1125 20:14:29.034685 4926 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.212:38016->38.102.83.212:46611: read tcp 38.102.83.212:38016->38.102.83.212:46611: read: connection reset by peer Nov 25 20:14:35 crc kubenswrapper[4926]: I1125 20:14:35.329448 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:14:35 crc kubenswrapper[4926]: E1125 20:14:35.330309 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:14:49 crc kubenswrapper[4926]: I1125 20:14:49.330138 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:14:49 crc kubenswrapper[4926]: E1125 20:14:49.331126 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.465175 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-g6jfx"] Nov 25 20:14:59 crc kubenswrapper[4926]: E1125 20:14:59.466291 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="extract-content" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.466334 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="extract-content" Nov 25 20:14:59 crc kubenswrapper[4926]: E1125 20:14:59.466367 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="extract-utilities" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.466394 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="extract-utilities" Nov 25 20:14:59 crc kubenswrapper[4926]: E1125 20:14:59.466454 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="registry-server" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.466464 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="registry-server" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.466694 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b40a6ca8-26b8-46d0-b00a-1e8b86fca553" containerName="registry-server" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.468873 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.483898 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g6jfx"] Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.617719 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-catalog-content\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.617757 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-969k4\" (UniqueName: \"kubernetes.io/projected/fc2aad25-650a-4363-8d84-bf03075d1d85-kube-api-access-969k4\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.618453 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-utilities\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.720683 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-utilities\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.720776 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-catalog-content\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.720820 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-969k4\" (UniqueName: \"kubernetes.io/projected/fc2aad25-650a-4363-8d84-bf03075d1d85-kube-api-access-969k4\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.721276 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-utilities\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.721501 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-catalog-content\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.747751 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-969k4\" (UniqueName: \"kubernetes.io/projected/fc2aad25-650a-4363-8d84-bf03075d1d85-kube-api-access-969k4\") pod \"redhat-marketplace-g6jfx\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:14:59 crc kubenswrapper[4926]: I1125 20:14:59.803326 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.168244 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx"] Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.170575 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.172937 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.173518 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 20:15:00 crc kubenswrapper[4926]: W1125 20:15:00.179671 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc2aad25_650a_4363_8d84_bf03075d1d85.slice/crio-01c516f2505edb8fa5a3233c3dba495f753c36d65c7074ed8b9b84e0f6adcb7c WatchSource:0}: Error finding container 01c516f2505edb8fa5a3233c3dba495f753c36d65c7074ed8b9b84e0f6adcb7c: Status 404 returned error can't find the container with id 01c516f2505edb8fa5a3233c3dba495f753c36d65c7074ed8b9b84e0f6adcb7c Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.191616 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx"] Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.215078 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g6jfx"] Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.351250 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f080d2bf-e48b-43e6-9213-e12731ef519f-secret-volume\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.351515 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw2lf\" (UniqueName: \"kubernetes.io/projected/f080d2bf-e48b-43e6-9213-e12731ef519f-kube-api-access-nw2lf\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.351644 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f080d2bf-e48b-43e6-9213-e12731ef519f-config-volume\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.456766 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw2lf\" (UniqueName: \"kubernetes.io/projected/f080d2bf-e48b-43e6-9213-e12731ef519f-kube-api-access-nw2lf\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.456861 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f080d2bf-e48b-43e6-9213-e12731ef519f-config-volume\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.456912 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f080d2bf-e48b-43e6-9213-e12731ef519f-secret-volume\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.458048 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f080d2bf-e48b-43e6-9213-e12731ef519f-config-volume\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.464727 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f080d2bf-e48b-43e6-9213-e12731ef519f-secret-volume\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.472239 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw2lf\" (UniqueName: \"kubernetes.io/projected/f080d2bf-e48b-43e6-9213-e12731ef519f-kube-api-access-nw2lf\") pod \"collect-profiles-29401695-9ncdx\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.519015 4926 generic.go:334] "Generic (PLEG): container finished" podID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerID="3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627" exitCode=0 Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.519126 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g6jfx" event={"ID":"fc2aad25-650a-4363-8d84-bf03075d1d85","Type":"ContainerDied","Data":"3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627"} Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.519322 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g6jfx" event={"ID":"fc2aad25-650a-4363-8d84-bf03075d1d85","Type":"ContainerStarted","Data":"01c516f2505edb8fa5a3233c3dba495f753c36d65c7074ed8b9b84e0f6adcb7c"} Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.521190 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 20:15:00 crc kubenswrapper[4926]: I1125 20:15:00.537778 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:01 crc kubenswrapper[4926]: W1125 20:15:01.040801 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf080d2bf_e48b_43e6_9213_e12731ef519f.slice/crio-90467b16db5b8d13fb522ea02c0f3d0e168f682b2fd66964129bf28b90d4114f WatchSource:0}: Error finding container 90467b16db5b8d13fb522ea02c0f3d0e168f682b2fd66964129bf28b90d4114f: Status 404 returned error can't find the container with id 90467b16db5b8d13fb522ea02c0f3d0e168f682b2fd66964129bf28b90d4114f Nov 25 20:15:01 crc kubenswrapper[4926]: I1125 20:15:01.051040 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx"] Nov 25 20:15:01 crc kubenswrapper[4926]: I1125 20:15:01.549818 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g6jfx" event={"ID":"fc2aad25-650a-4363-8d84-bf03075d1d85","Type":"ContainerStarted","Data":"c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0"} Nov 25 20:15:01 crc kubenswrapper[4926]: I1125 20:15:01.552766 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" event={"ID":"f080d2bf-e48b-43e6-9213-e12731ef519f","Type":"ContainerStarted","Data":"503b6e445401accb6acbb2acdba6c18c047bfbb9a4d7e4302ff1548eaeade41e"} Nov 25 20:15:01 crc kubenswrapper[4926]: I1125 20:15:01.552966 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" event={"ID":"f080d2bf-e48b-43e6-9213-e12731ef519f","Type":"ContainerStarted","Data":"90467b16db5b8d13fb522ea02c0f3d0e168f682b2fd66964129bf28b90d4114f"} Nov 25 20:15:01 crc kubenswrapper[4926]: I1125 20:15:01.612793 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" podStartSLOduration=1.612774629 podStartE2EDuration="1.612774629s" podCreationTimestamp="2025-11-25 20:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 20:15:01.594726304 +0000 UTC m=+7331.980239909" watchObservedRunningTime="2025-11-25 20:15:01.612774629 +0000 UTC m=+7331.998288234" Nov 25 20:15:02 crc kubenswrapper[4926]: I1125 20:15:02.330195 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:15:02 crc kubenswrapper[4926]: E1125 20:15:02.330548 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:15:02 crc kubenswrapper[4926]: I1125 20:15:02.567209 4926 generic.go:334] "Generic (PLEG): container finished" podID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerID="c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0" exitCode=0 Nov 25 20:15:02 crc kubenswrapper[4926]: I1125 20:15:02.567318 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g6jfx" event={"ID":"fc2aad25-650a-4363-8d84-bf03075d1d85","Type":"ContainerDied","Data":"c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0"} Nov 25 20:15:02 crc kubenswrapper[4926]: I1125 20:15:02.571104 4926 generic.go:334] "Generic (PLEG): container finished" podID="f080d2bf-e48b-43e6-9213-e12731ef519f" containerID="503b6e445401accb6acbb2acdba6c18c047bfbb9a4d7e4302ff1548eaeade41e" exitCode=0 Nov 25 20:15:02 crc kubenswrapper[4926]: I1125 20:15:02.571154 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" event={"ID":"f080d2bf-e48b-43e6-9213-e12731ef519f","Type":"ContainerDied","Data":"503b6e445401accb6acbb2acdba6c18c047bfbb9a4d7e4302ff1548eaeade41e"} Nov 25 20:15:03 crc kubenswrapper[4926]: I1125 20:15:03.581702 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g6jfx" event={"ID":"fc2aad25-650a-4363-8d84-bf03075d1d85","Type":"ContainerStarted","Data":"fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848"} Nov 25 20:15:03 crc kubenswrapper[4926]: I1125 20:15:03.619958 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-g6jfx" podStartSLOduration=1.948432125 podStartE2EDuration="4.619939707s" podCreationTimestamp="2025-11-25 20:14:59 +0000 UTC" firstStartedPulling="2025-11-25 20:15:00.520948365 +0000 UTC m=+7330.906461970" lastFinishedPulling="2025-11-25 20:15:03.192455957 +0000 UTC m=+7333.577969552" observedRunningTime="2025-11-25 20:15:03.610681217 +0000 UTC m=+7333.996194822" watchObservedRunningTime="2025-11-25 20:15:03.619939707 +0000 UTC m=+7334.005453312" Nov 25 20:15:03 crc kubenswrapper[4926]: I1125 20:15:03.947735 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.136635 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw2lf\" (UniqueName: \"kubernetes.io/projected/f080d2bf-e48b-43e6-9213-e12731ef519f-kube-api-access-nw2lf\") pod \"f080d2bf-e48b-43e6-9213-e12731ef519f\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.136925 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f080d2bf-e48b-43e6-9213-e12731ef519f-secret-volume\") pod \"f080d2bf-e48b-43e6-9213-e12731ef519f\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.137115 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f080d2bf-e48b-43e6-9213-e12731ef519f-config-volume\") pod \"f080d2bf-e48b-43e6-9213-e12731ef519f\" (UID: \"f080d2bf-e48b-43e6-9213-e12731ef519f\") " Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.138205 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f080d2bf-e48b-43e6-9213-e12731ef519f-config-volume" (OuterVolumeSpecName: "config-volume") pod "f080d2bf-e48b-43e6-9213-e12731ef519f" (UID: "f080d2bf-e48b-43e6-9213-e12731ef519f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.153673 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f080d2bf-e48b-43e6-9213-e12731ef519f-kube-api-access-nw2lf" (OuterVolumeSpecName: "kube-api-access-nw2lf") pod "f080d2bf-e48b-43e6-9213-e12731ef519f" (UID: "f080d2bf-e48b-43e6-9213-e12731ef519f"). InnerVolumeSpecName "kube-api-access-nw2lf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.158535 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f080d2bf-e48b-43e6-9213-e12731ef519f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f080d2bf-e48b-43e6-9213-e12731ef519f" (UID: "f080d2bf-e48b-43e6-9213-e12731ef519f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.240012 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw2lf\" (UniqueName: \"kubernetes.io/projected/f080d2bf-e48b-43e6-9213-e12731ef519f-kube-api-access-nw2lf\") on node \"crc\" DevicePath \"\"" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.240041 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f080d2bf-e48b-43e6-9213-e12731ef519f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.240051 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f080d2bf-e48b-43e6-9213-e12731ef519f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.595666 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.595666 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401695-9ncdx" event={"ID":"f080d2bf-e48b-43e6-9213-e12731ef519f","Type":"ContainerDied","Data":"90467b16db5b8d13fb522ea02c0f3d0e168f682b2fd66964129bf28b90d4114f"} Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.595893 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90467b16db5b8d13fb522ea02c0f3d0e168f682b2fd66964129bf28b90d4114f" Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.697625 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr"] Nov 25 20:15:04 crc kubenswrapper[4926]: I1125 20:15:04.708474 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401650-5t6jr"] Nov 25 20:15:06 crc kubenswrapper[4926]: I1125 20:15:06.345787 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="874de401-fcce-4271-8d44-3df388134a89" path="/var/lib/kubelet/pods/874de401-fcce-4271-8d44-3df388134a89/volumes" Nov 25 20:15:09 crc kubenswrapper[4926]: I1125 20:15:09.803775 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:15:09 crc kubenswrapper[4926]: I1125 20:15:09.804562 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:15:09 crc kubenswrapper[4926]: I1125 20:15:09.891676 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:15:10 crc kubenswrapper[4926]: I1125 20:15:10.760664 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:15:10 crc kubenswrapper[4926]: I1125 20:15:10.832231 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g6jfx"] Nov 25 20:15:12 crc kubenswrapper[4926]: I1125 20:15:12.689056 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-g6jfx" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="registry-server" containerID="cri-o://fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848" gracePeriod=2 Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.192781 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.321252 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-catalog-content\") pod \"fc2aad25-650a-4363-8d84-bf03075d1d85\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.321351 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-utilities\") pod \"fc2aad25-650a-4363-8d84-bf03075d1d85\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.321474 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-969k4\" (UniqueName: \"kubernetes.io/projected/fc2aad25-650a-4363-8d84-bf03075d1d85-kube-api-access-969k4\") pod \"fc2aad25-650a-4363-8d84-bf03075d1d85\" (UID: \"fc2aad25-650a-4363-8d84-bf03075d1d85\") " Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.322767 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-utilities" (OuterVolumeSpecName: "utilities") pod "fc2aad25-650a-4363-8d84-bf03075d1d85" (UID: "fc2aad25-650a-4363-8d84-bf03075d1d85"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.330393 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc2aad25-650a-4363-8d84-bf03075d1d85-kube-api-access-969k4" (OuterVolumeSpecName: "kube-api-access-969k4") pod "fc2aad25-650a-4363-8d84-bf03075d1d85" (UID: "fc2aad25-650a-4363-8d84-bf03075d1d85"). InnerVolumeSpecName "kube-api-access-969k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.338585 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc2aad25-650a-4363-8d84-bf03075d1d85" (UID: "fc2aad25-650a-4363-8d84-bf03075d1d85"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.423919 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-969k4\" (UniqueName: \"kubernetes.io/projected/fc2aad25-650a-4363-8d84-bf03075d1d85-kube-api-access-969k4\") on node \"crc\" DevicePath \"\"" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.423958 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.423969 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc2aad25-650a-4363-8d84-bf03075d1d85-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.705192 4926 generic.go:334] "Generic (PLEG): container finished" podID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerID="fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848" exitCode=0 Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.705265 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g6jfx" event={"ID":"fc2aad25-650a-4363-8d84-bf03075d1d85","Type":"ContainerDied","Data":"fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848"} Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.705621 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g6jfx" event={"ID":"fc2aad25-650a-4363-8d84-bf03075d1d85","Type":"ContainerDied","Data":"01c516f2505edb8fa5a3233c3dba495f753c36d65c7074ed8b9b84e0f6adcb7c"} Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.705317 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g6jfx" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.705704 4926 scope.go:117] "RemoveContainer" containerID="fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.730134 4926 scope.go:117] "RemoveContainer" containerID="c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.771659 4926 scope.go:117] "RemoveContainer" containerID="3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.816450 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g6jfx"] Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.816495 4926 scope.go:117] "RemoveContainer" containerID="fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848" Nov 25 20:15:13 crc kubenswrapper[4926]: E1125 20:15:13.817144 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848\": container with ID starting with fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848 not found: ID does not exist" containerID="fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.817259 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848"} err="failed to get container status \"fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848\": rpc error: code = NotFound desc = could not find container \"fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848\": container with ID starting with fd1897acb37b3085207a206ea10ee2551a8b9d98083e0b0f88e2cc1bb6016848 not found: ID does not exist" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.817480 4926 scope.go:117] "RemoveContainer" containerID="c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0" Nov 25 20:15:13 crc kubenswrapper[4926]: E1125 20:15:13.818872 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0\": container with ID starting with c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0 not found: ID does not exist" containerID="c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.819021 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0"} err="failed to get container status \"c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0\": rpc error: code = NotFound desc = could not find container \"c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0\": container with ID starting with c230b5659bf1fa0d3ea358af5b081f5f7e1a41d33e7221e8cd74e699b5732ff0 not found: ID does not exist" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.819137 4926 scope.go:117] "RemoveContainer" containerID="3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627" Nov 25 20:15:13 crc kubenswrapper[4926]: E1125 20:15:13.819815 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627\": container with ID starting with 3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627 not found: ID does not exist" containerID="3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.819934 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627"} err="failed to get container status \"3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627\": rpc error: code = NotFound desc = could not find container \"3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627\": container with ID starting with 3a018debdf3c7e27792934bf4166f746f0c91e4538d6f33ee086cea411284627 not found: ID does not exist" Nov 25 20:15:13 crc kubenswrapper[4926]: I1125 20:15:13.827116 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-g6jfx"] Nov 25 20:15:14 crc kubenswrapper[4926]: I1125 20:15:14.345981 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" path="/var/lib/kubelet/pods/fc2aad25-650a-4363-8d84-bf03075d1d85/volumes" Nov 25 20:15:17 crc kubenswrapper[4926]: I1125 20:15:17.329422 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:15:17 crc kubenswrapper[4926]: E1125 20:15:17.330131 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:15:26 crc kubenswrapper[4926]: I1125 20:15:26.495083 4926 scope.go:117] "RemoveContainer" containerID="6a65ffbe0f74eefd3dd0f67e0a74ed9178c98e60be76f3ae986a4ff69380e78c" Nov 25 20:15:30 crc kubenswrapper[4926]: I1125 20:15:30.344216 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:15:30 crc kubenswrapper[4926]: E1125 20:15:30.345683 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:15:41 crc kubenswrapper[4926]: I1125 20:15:41.330534 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:15:41 crc kubenswrapper[4926]: E1125 20:15:41.331283 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:15:54 crc kubenswrapper[4926]: I1125 20:15:54.331341 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:15:54 crc kubenswrapper[4926]: E1125 20:15:54.334167 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:16:07 crc kubenswrapper[4926]: I1125 20:16:07.330176 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:16:08 crc kubenswrapper[4926]: I1125 20:16:08.496977 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"c60ad3b624c8477448ef0ff379410dc5e7fce5a8bc2ae9c79b8d7d24f184dab8"} Nov 25 20:16:10 crc kubenswrapper[4926]: I1125 20:16:10.525820 4926 generic.go:334] "Generic (PLEG): container finished" podID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerID="79c08244b69a86eef6e95722d70c511ae8cb51386790a43cd47b3ba9af23af20" exitCode=0 Nov 25 20:16:10 crc kubenswrapper[4926]: I1125 20:16:10.525949 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" event={"ID":"a198c3ed-d652-4d90-87c1-ac0f117fd1f7","Type":"ContainerDied","Data":"79c08244b69a86eef6e95722d70c511ae8cb51386790a43cd47b3ba9af23af20"} Nov 25 20:16:10 crc kubenswrapper[4926]: I1125 20:16:10.527084 4926 scope.go:117] "RemoveContainer" containerID="79c08244b69a86eef6e95722d70c511ae8cb51386790a43cd47b3ba9af23af20" Nov 25 20:16:10 crc kubenswrapper[4926]: I1125 20:16:10.829745 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rtnrl_must-gather-sdwwr_a198c3ed-d652-4d90-87c1-ac0f117fd1f7/gather/0.log" Nov 25 20:16:20 crc kubenswrapper[4926]: I1125 20:16:20.261548 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rtnrl/must-gather-sdwwr"] Nov 25 20:16:20 crc kubenswrapper[4926]: I1125 20:16:20.262861 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerName="copy" containerID="cri-o://17965182a11249e6e4fc4c2080ba16d37afee597ff84ff2923f4df5ce92e7cb5" gracePeriod=2 Nov 25 20:16:20 crc kubenswrapper[4926]: I1125 20:16:20.274239 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rtnrl/must-gather-sdwwr"] Nov 25 20:16:25 crc kubenswrapper[4926]: I1125 20:16:25.226297 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rtnrl_must-gather-sdwwr_a198c3ed-d652-4d90-87c1-ac0f117fd1f7/copy/0.log" Nov 25 20:16:25 crc kubenswrapper[4926]: I1125 20:16:25.227140 4926 generic.go:334] "Generic (PLEG): container finished" podID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerID="17965182a11249e6e4fc4c2080ba16d37afee597ff84ff2923f4df5ce92e7cb5" exitCode=143 Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.210800 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rtnrl_must-gather-sdwwr_a198c3ed-d652-4d90-87c1-ac0f117fd1f7/copy/0.log" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.211471 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.246605 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rtnrl_must-gather-sdwwr_a198c3ed-d652-4d90-87c1-ac0f117fd1f7/copy/0.log" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.249604 4926 scope.go:117] "RemoveContainer" containerID="17965182a11249e6e4fc4c2080ba16d37afee597ff84ff2923f4df5ce92e7cb5" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.249751 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rtnrl/must-gather-sdwwr" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.284564 4926 scope.go:117] "RemoveContainer" containerID="79c08244b69a86eef6e95722d70c511ae8cb51386790a43cd47b3ba9af23af20" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.292354 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptgbd\" (UniqueName: \"kubernetes.io/projected/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-kube-api-access-ptgbd\") pod \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.292598 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-must-gather-output\") pod \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\" (UID: \"a198c3ed-d652-4d90-87c1-ac0f117fd1f7\") " Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.303569 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-kube-api-access-ptgbd" (OuterVolumeSpecName: "kube-api-access-ptgbd") pod "a198c3ed-d652-4d90-87c1-ac0f117fd1f7" (UID: "a198c3ed-d652-4d90-87c1-ac0f117fd1f7"). InnerVolumeSpecName "kube-api-access-ptgbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.395220 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptgbd\" (UniqueName: \"kubernetes.io/projected/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-kube-api-access-ptgbd\") on node \"crc\" DevicePath \"\"" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.507684 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "a198c3ed-d652-4d90-87c1-ac0f117fd1f7" (UID: "a198c3ed-d652-4d90-87c1-ac0f117fd1f7"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:16:26 crc kubenswrapper[4926]: I1125 20:16:26.599965 4926 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a198c3ed-d652-4d90-87c1-ac0f117fd1f7-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 20:16:28 crc kubenswrapper[4926]: I1125 20:16:28.354553 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" path="/var/lib/kubelet/pods/a198c3ed-d652-4d90-87c1-ac0f117fd1f7/volumes" Nov 25 20:17:26 crc kubenswrapper[4926]: I1125 20:17:26.638532 4926 scope.go:117] "RemoveContainer" containerID="211842072b5668367f74c52fb007cba637ce15c26b3d3134ec551350e7fbc409" Nov 25 20:18:33 crc kubenswrapper[4926]: I1125 20:18:33.541463 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:18:33 crc kubenswrapper[4926]: I1125 20:18:33.542200 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:19:03 crc kubenswrapper[4926]: I1125 20:19:03.542147 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:19:03 crc kubenswrapper[4926]: I1125 20:19:03.542920 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.541593 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.542105 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.542159 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.543009 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c60ad3b624c8477448ef0ff379410dc5e7fce5a8bc2ae9c79b8d7d24f184dab8"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.543060 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://c60ad3b624c8477448ef0ff379410dc5e7fce5a8bc2ae9c79b8d7d24f184dab8" gracePeriod=600 Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.899334 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="c60ad3b624c8477448ef0ff379410dc5e7fce5a8bc2ae9c79b8d7d24f184dab8" exitCode=0 Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.899416 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"c60ad3b624c8477448ef0ff379410dc5e7fce5a8bc2ae9c79b8d7d24f184dab8"} Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.899666 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerStarted","Data":"4b24b4e1c30c7b3dd5c18142751fd0136d8a94c0c226bde8ff3eb5442afd558a"} Nov 25 20:19:33 crc kubenswrapper[4926]: I1125 20:19:33.899685 4926 scope.go:117] "RemoveContainer" containerID="44af61f9e227eab07c26645c76673284a1b099a225552b2a4bbb0ef0c93cfdad" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.720856 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kmrgf"] Nov 25 20:21:26 crc kubenswrapper[4926]: E1125 20:21:26.721820 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerName="copy" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.721833 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerName="copy" Nov 25 20:21:26 crc kubenswrapper[4926]: E1125 20:21:26.721852 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerName="gather" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.721858 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerName="gather" Nov 25 20:21:26 crc kubenswrapper[4926]: E1125 20:21:26.721871 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="registry-server" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.721878 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="registry-server" Nov 25 20:21:26 crc kubenswrapper[4926]: E1125 20:21:26.721893 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="extract-content" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.721898 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="extract-content" Nov 25 20:21:26 crc kubenswrapper[4926]: E1125 20:21:26.721919 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="extract-utilities" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.721925 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="extract-utilities" Nov 25 20:21:26 crc kubenswrapper[4926]: E1125 20:21:26.721941 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f080d2bf-e48b-43e6-9213-e12731ef519f" containerName="collect-profiles" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.721946 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f080d2bf-e48b-43e6-9213-e12731ef519f" containerName="collect-profiles" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.722132 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerName="gather" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.722159 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f080d2bf-e48b-43e6-9213-e12731ef519f" containerName="collect-profiles" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.722173 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc2aad25-650a-4363-8d84-bf03075d1d85" containerName="registry-server" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.722180 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a198c3ed-d652-4d90-87c1-ac0f117fd1f7" containerName="copy" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.723606 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.771486 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kmrgf"] Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.868652 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z984q\" (UniqueName: \"kubernetes.io/projected/e5b855ff-f019-4a92-a4df-d42a17c14a30-kube-api-access-z984q\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.869015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-utilities\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.869281 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-catalog-content\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.915864 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pqj28"] Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.920112 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.949552 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pqj28"] Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.971738 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c84s\" (UniqueName: \"kubernetes.io/projected/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-kube-api-access-2c84s\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.971803 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z984q\" (UniqueName: \"kubernetes.io/projected/e5b855ff-f019-4a92-a4df-d42a17c14a30-kube-api-access-z984q\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.971835 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-utilities\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.971934 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-utilities\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.971997 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-catalog-content\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.972051 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-catalog-content\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.972623 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-catalog-content\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.973777 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-utilities\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:26 crc kubenswrapper[4926]: I1125 20:21:26.994242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z984q\" (UniqueName: \"kubernetes.io/projected/e5b855ff-f019-4a92-a4df-d42a17c14a30-kube-api-access-z984q\") pod \"certified-operators-kmrgf\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.071912 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.073667 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c84s\" (UniqueName: \"kubernetes.io/projected/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-kube-api-access-2c84s\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.073773 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-utilities\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.073805 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-catalog-content\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.074234 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-catalog-content\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.074330 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-utilities\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.093698 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c84s\" (UniqueName: \"kubernetes.io/projected/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-kube-api-access-2c84s\") pod \"community-operators-pqj28\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.235503 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.743091 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pqj28"] Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.753861 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kmrgf"] Nov 25 20:21:27 crc kubenswrapper[4926]: W1125 20:21:27.766950 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5b855ff_f019_4a92_a4df_d42a17c14a30.slice/crio-e929495df70fbc91ae562482a9207de5105212dfd3d918f5feac0cb993e4c39a WatchSource:0}: Error finding container e929495df70fbc91ae562482a9207de5105212dfd3d918f5feac0cb993e4c39a: Status 404 returned error can't find the container with id e929495df70fbc91ae562482a9207de5105212dfd3d918f5feac0cb993e4c39a Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.935481 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kmrgf" event={"ID":"e5b855ff-f019-4a92-a4df-d42a17c14a30","Type":"ContainerStarted","Data":"e929495df70fbc91ae562482a9207de5105212dfd3d918f5feac0cb993e4c39a"} Nov 25 20:21:27 crc kubenswrapper[4926]: I1125 20:21:27.939963 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqj28" event={"ID":"b2a7c769-c4f5-4f09-abb6-ac9d67356efa","Type":"ContainerStarted","Data":"fa369f7cf1f6a03a8ca84b0d831cc39238e3bb87982265523c451db4ae9add3c"} Nov 25 20:21:28 crc kubenswrapper[4926]: I1125 20:21:28.958540 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2a7c769-c4f5-4f09-abb6-ac9d67356efa" containerID="4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9" exitCode=0 Nov 25 20:21:28 crc kubenswrapper[4926]: I1125 20:21:28.958638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqj28" event={"ID":"b2a7c769-c4f5-4f09-abb6-ac9d67356efa","Type":"ContainerDied","Data":"4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9"} Nov 25 20:21:28 crc kubenswrapper[4926]: I1125 20:21:28.963817 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 20:21:28 crc kubenswrapper[4926]: I1125 20:21:28.968964 4926 generic.go:334] "Generic (PLEG): container finished" podID="e5b855ff-f019-4a92-a4df-d42a17c14a30" containerID="9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da" exitCode=0 Nov 25 20:21:28 crc kubenswrapper[4926]: I1125 20:21:28.969022 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kmrgf" event={"ID":"e5b855ff-f019-4a92-a4df-d42a17c14a30","Type":"ContainerDied","Data":"9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da"} Nov 25 20:21:30 crc kubenswrapper[4926]: I1125 20:21:30.001727 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kmrgf" event={"ID":"e5b855ff-f019-4a92-a4df-d42a17c14a30","Type":"ContainerStarted","Data":"a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053"} Nov 25 20:21:31 crc kubenswrapper[4926]: I1125 20:21:31.018440 4926 generic.go:334] "Generic (PLEG): container finished" podID="e5b855ff-f019-4a92-a4df-d42a17c14a30" containerID="a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053" exitCode=0 Nov 25 20:21:31 crc kubenswrapper[4926]: I1125 20:21:31.018540 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kmrgf" event={"ID":"e5b855ff-f019-4a92-a4df-d42a17c14a30","Type":"ContainerDied","Data":"a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053"} Nov 25 20:21:31 crc kubenswrapper[4926]: I1125 20:21:31.025444 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqj28" event={"ID":"b2a7c769-c4f5-4f09-abb6-ac9d67356efa","Type":"ContainerStarted","Data":"a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae"} Nov 25 20:21:32 crc kubenswrapper[4926]: I1125 20:21:32.037768 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2a7c769-c4f5-4f09-abb6-ac9d67356efa" containerID="a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae" exitCode=0 Nov 25 20:21:32 crc kubenswrapper[4926]: I1125 20:21:32.037893 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqj28" event={"ID":"b2a7c769-c4f5-4f09-abb6-ac9d67356efa","Type":"ContainerDied","Data":"a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae"} Nov 25 20:21:32 crc kubenswrapper[4926]: I1125 20:21:32.042102 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kmrgf" event={"ID":"e5b855ff-f019-4a92-a4df-d42a17c14a30","Type":"ContainerStarted","Data":"ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67"} Nov 25 20:21:32 crc kubenswrapper[4926]: I1125 20:21:32.088416 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kmrgf" podStartSLOduration=3.623385483 podStartE2EDuration="6.088397658s" podCreationTimestamp="2025-11-25 20:21:26 +0000 UTC" firstStartedPulling="2025-11-25 20:21:28.971075159 +0000 UTC m=+7719.356588784" lastFinishedPulling="2025-11-25 20:21:31.436087354 +0000 UTC m=+7721.821600959" observedRunningTime="2025-11-25 20:21:32.081960972 +0000 UTC m=+7722.467474587" watchObservedRunningTime="2025-11-25 20:21:32.088397658 +0000 UTC m=+7722.473911263" Nov 25 20:21:33 crc kubenswrapper[4926]: I1125 20:21:33.055234 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqj28" event={"ID":"b2a7c769-c4f5-4f09-abb6-ac9d67356efa","Type":"ContainerStarted","Data":"8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360"} Nov 25 20:21:33 crc kubenswrapper[4926]: I1125 20:21:33.078753 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pqj28" podStartSLOduration=3.58654951 podStartE2EDuration="7.078731887s" podCreationTimestamp="2025-11-25 20:21:26 +0000 UTC" firstStartedPulling="2025-11-25 20:21:28.963487192 +0000 UTC m=+7719.349000807" lastFinishedPulling="2025-11-25 20:21:32.455669579 +0000 UTC m=+7722.841183184" observedRunningTime="2025-11-25 20:21:33.071740998 +0000 UTC m=+7723.457254603" watchObservedRunningTime="2025-11-25 20:21:33.078731887 +0000 UTC m=+7723.464245492" Nov 25 20:21:33 crc kubenswrapper[4926]: I1125 20:21:33.542050 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:21:33 crc kubenswrapper[4926]: I1125 20:21:33.542133 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:21:37 crc kubenswrapper[4926]: I1125 20:21:37.072247 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:37 crc kubenswrapper[4926]: I1125 20:21:37.073451 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:37 crc kubenswrapper[4926]: I1125 20:21:37.167948 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:37 crc kubenswrapper[4926]: I1125 20:21:37.236965 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:37 crc kubenswrapper[4926]: I1125 20:21:37.237016 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:37 crc kubenswrapper[4926]: I1125 20:21:37.248339 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:37 crc kubenswrapper[4926]: I1125 20:21:37.323835 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:38 crc kubenswrapper[4926]: I1125 20:21:38.213255 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.307399 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kmrgf"] Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.307934 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kmrgf" podUID="e5b855ff-f019-4a92-a4df-d42a17c14a30" containerName="registry-server" containerID="cri-o://ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67" gracePeriod=2 Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.820299 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.911234 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-utilities\") pod \"e5b855ff-f019-4a92-a4df-d42a17c14a30\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.911365 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-catalog-content\") pod \"e5b855ff-f019-4a92-a4df-d42a17c14a30\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.911572 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z984q\" (UniqueName: \"kubernetes.io/projected/e5b855ff-f019-4a92-a4df-d42a17c14a30-kube-api-access-z984q\") pod \"e5b855ff-f019-4a92-a4df-d42a17c14a30\" (UID: \"e5b855ff-f019-4a92-a4df-d42a17c14a30\") " Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.913945 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-utilities" (OuterVolumeSpecName: "utilities") pod "e5b855ff-f019-4a92-a4df-d42a17c14a30" (UID: "e5b855ff-f019-4a92-a4df-d42a17c14a30"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.923134 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5b855ff-f019-4a92-a4df-d42a17c14a30-kube-api-access-z984q" (OuterVolumeSpecName: "kube-api-access-z984q") pod "e5b855ff-f019-4a92-a4df-d42a17c14a30" (UID: "e5b855ff-f019-4a92-a4df-d42a17c14a30"). InnerVolumeSpecName "kube-api-access-z984q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:21:39 crc kubenswrapper[4926]: I1125 20:21:39.973120 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5b855ff-f019-4a92-a4df-d42a17c14a30" (UID: "e5b855ff-f019-4a92-a4df-d42a17c14a30"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.016393 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.016421 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5b855ff-f019-4a92-a4df-d42a17c14a30-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.016434 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z984q\" (UniqueName: \"kubernetes.io/projected/e5b855ff-f019-4a92-a4df-d42a17c14a30-kube-api-access-z984q\") on node \"crc\" DevicePath \"\"" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.105215 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pqj28"] Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.150993 4926 generic.go:334] "Generic (PLEG): container finished" podID="e5b855ff-f019-4a92-a4df-d42a17c14a30" containerID="ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67" exitCode=0 Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.151139 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kmrgf" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.151228 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kmrgf" event={"ID":"e5b855ff-f019-4a92-a4df-d42a17c14a30","Type":"ContainerDied","Data":"ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67"} Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.151288 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kmrgf" event={"ID":"e5b855ff-f019-4a92-a4df-d42a17c14a30","Type":"ContainerDied","Data":"e929495df70fbc91ae562482a9207de5105212dfd3d918f5feac0cb993e4c39a"} Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.151317 4926 scope.go:117] "RemoveContainer" containerID="ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.151518 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pqj28" podUID="b2a7c769-c4f5-4f09-abb6-ac9d67356efa" containerName="registry-server" containerID="cri-o://8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360" gracePeriod=2 Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.178508 4926 scope.go:117] "RemoveContainer" containerID="a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.198510 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kmrgf"] Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.215311 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kmrgf"] Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.228883 4926 scope.go:117] "RemoveContainer" containerID="9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.334330 4926 scope.go:117] "RemoveContainer" containerID="ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67" Nov 25 20:21:40 crc kubenswrapper[4926]: E1125 20:21:40.336515 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67\": container with ID starting with ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67 not found: ID does not exist" containerID="ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.336553 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67"} err="failed to get container status \"ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67\": rpc error: code = NotFound desc = could not find container \"ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67\": container with ID starting with ce6b17d9834b0c4a054b83ff115d81e55d0b2eca250a442dd8ee0ccb00a98b67 not found: ID does not exist" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.336578 4926 scope.go:117] "RemoveContainer" containerID="a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053" Nov 25 20:21:40 crc kubenswrapper[4926]: E1125 20:21:40.337498 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053\": container with ID starting with a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053 not found: ID does not exist" containerID="a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.338565 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053"} err="failed to get container status \"a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053\": rpc error: code = NotFound desc = could not find container \"a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053\": container with ID starting with a510811ce0ae87605e44456f9a82df82994f5ec73c394f6e2d7a6e11676cd053 not found: ID does not exist" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.338618 4926 scope.go:117] "RemoveContainer" containerID="9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da" Nov 25 20:21:40 crc kubenswrapper[4926]: E1125 20:21:40.340196 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da\": container with ID starting with 9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da not found: ID does not exist" containerID="9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.340224 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da"} err="failed to get container status \"9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da\": rpc error: code = NotFound desc = could not find container \"9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da\": container with ID starting with 9a1e7f8e5635ff1cd4f0fbb2dd5c6602ff96a7b62b64d37a10806a8528bf10da not found: ID does not exist" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.342515 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5b855ff-f019-4a92-a4df-d42a17c14a30" path="/var/lib/kubelet/pods/e5b855ff-f019-4a92-a4df-d42a17c14a30/volumes" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.644052 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.734334 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-utilities\") pod \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.734563 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c84s\" (UniqueName: \"kubernetes.io/projected/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-kube-api-access-2c84s\") pod \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.734611 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-catalog-content\") pod \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\" (UID: \"b2a7c769-c4f5-4f09-abb6-ac9d67356efa\") " Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.735024 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-utilities" (OuterVolumeSpecName: "utilities") pod "b2a7c769-c4f5-4f09-abb6-ac9d67356efa" (UID: "b2a7c769-c4f5-4f09-abb6-ac9d67356efa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.735719 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.747767 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-kube-api-access-2c84s" (OuterVolumeSpecName: "kube-api-access-2c84s") pod "b2a7c769-c4f5-4f09-abb6-ac9d67356efa" (UID: "b2a7c769-c4f5-4f09-abb6-ac9d67356efa"). InnerVolumeSpecName "kube-api-access-2c84s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.791366 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2a7c769-c4f5-4f09-abb6-ac9d67356efa" (UID: "b2a7c769-c4f5-4f09-abb6-ac9d67356efa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.838083 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c84s\" (UniqueName: \"kubernetes.io/projected/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-kube-api-access-2c84s\") on node \"crc\" DevicePath \"\"" Nov 25 20:21:40 crc kubenswrapper[4926]: I1125 20:21:40.838118 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2a7c769-c4f5-4f09-abb6-ac9d67356efa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.167863 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2a7c769-c4f5-4f09-abb6-ac9d67356efa" containerID="8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360" exitCode=0 Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.167913 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqj28" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.167925 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqj28" event={"ID":"b2a7c769-c4f5-4f09-abb6-ac9d67356efa","Type":"ContainerDied","Data":"8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360"} Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.168494 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqj28" event={"ID":"b2a7c769-c4f5-4f09-abb6-ac9d67356efa","Type":"ContainerDied","Data":"fa369f7cf1f6a03a8ca84b0d831cc39238e3bb87982265523c451db4ae9add3c"} Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.168530 4926 scope.go:117] "RemoveContainer" containerID="8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.216197 4926 scope.go:117] "RemoveContainer" containerID="a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.223734 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pqj28"] Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.232507 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pqj28"] Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.260188 4926 scope.go:117] "RemoveContainer" containerID="4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.296968 4926 scope.go:117] "RemoveContainer" containerID="8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360" Nov 25 20:21:41 crc kubenswrapper[4926]: E1125 20:21:41.297733 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360\": container with ID starting with 8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360 not found: ID does not exist" containerID="8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.297797 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360"} err="failed to get container status \"8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360\": rpc error: code = NotFound desc = could not find container \"8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360\": container with ID starting with 8d7a24a2cc034d3f23511a5e59e770f93de2b34d89c989ea014e7f7675910360 not found: ID does not exist" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.297825 4926 scope.go:117] "RemoveContainer" containerID="a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae" Nov 25 20:21:41 crc kubenswrapper[4926]: E1125 20:21:41.298130 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae\": container with ID starting with a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae not found: ID does not exist" containerID="a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.298164 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae"} err="failed to get container status \"a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae\": rpc error: code = NotFound desc = could not find container \"a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae\": container with ID starting with a7b946dabde91eb952320f5fc99ac23ed4cdbbb8e0a6d1c68d860f459a2b44ae not found: ID does not exist" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.298183 4926 scope.go:117] "RemoveContainer" containerID="4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9" Nov 25 20:21:41 crc kubenswrapper[4926]: E1125 20:21:41.298518 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9\": container with ID starting with 4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9 not found: ID does not exist" containerID="4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9" Nov 25 20:21:41 crc kubenswrapper[4926]: I1125 20:21:41.298545 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9"} err="failed to get container status \"4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9\": rpc error: code = NotFound desc = could not find container \"4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9\": container with ID starting with 4c535f90ccdc3760590404abaae028fc9d8a3fa41c5da571ef2aa258c4ac4bf9 not found: ID does not exist" Nov 25 20:21:42 crc kubenswrapper[4926]: I1125 20:21:42.364864 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2a7c769-c4f5-4f09-abb6-ac9d67356efa" path="/var/lib/kubelet/pods/b2a7c769-c4f5-4f09-abb6-ac9d67356efa/volumes" Nov 25 20:22:03 crc kubenswrapper[4926]: I1125 20:22:03.541479 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:22:03 crc kubenswrapper[4926]: I1125 20:22:03.541994 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.542094 4926 patch_prober.go:28] interesting pod/machine-config-daemon-skdzg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.542628 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.542679 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.543499 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b24b4e1c30c7b3dd5c18142751fd0136d8a94c0c226bde8ff3eb5442afd558a"} pod="openshift-machine-config-operator/machine-config-daemon-skdzg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.543555 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerName="machine-config-daemon" containerID="cri-o://4b24b4e1c30c7b3dd5c18142751fd0136d8a94c0c226bde8ff3eb5442afd558a" gracePeriod=600 Nov 25 20:22:33 crc kubenswrapper[4926]: E1125 20:22:33.674507 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.833856 4926 generic.go:334] "Generic (PLEG): container finished" podID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" containerID="4b24b4e1c30c7b3dd5c18142751fd0136d8a94c0c226bde8ff3eb5442afd558a" exitCode=0 Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.833922 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" event={"ID":"5655ebe9-673e-4e9e-ad75-edf6c92bddb7","Type":"ContainerDied","Data":"4b24b4e1c30c7b3dd5c18142751fd0136d8a94c0c226bde8ff3eb5442afd558a"} Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.833959 4926 scope.go:117] "RemoveContainer" containerID="c60ad3b624c8477448ef0ff379410dc5e7fce5a8bc2ae9c79b8d7d24f184dab8" Nov 25 20:22:33 crc kubenswrapper[4926]: I1125 20:22:33.835016 4926 scope.go:117] "RemoveContainer" containerID="4b24b4e1c30c7b3dd5c18142751fd0136d8a94c0c226bde8ff3eb5442afd558a" Nov 25 20:22:33 crc kubenswrapper[4926]: E1125 20:22:33.835528 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" Nov 25 20:22:47 crc kubenswrapper[4926]: I1125 20:22:47.329461 4926 scope.go:117] "RemoveContainer" containerID="4b24b4e1c30c7b3dd5c18142751fd0136d8a94c0c226bde8ff3eb5442afd558a" Nov 25 20:22:47 crc kubenswrapper[4926]: E1125 20:22:47.330398 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skdzg_openshift-machine-config-operator(5655ebe9-673e-4e9e-ad75-edf6c92bddb7)\"" pod="openshift-machine-config-operator/machine-config-daemon-skdzg" podUID="5655ebe9-673e-4e9e-ad75-edf6c92bddb7" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111410047024437 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111410050017346 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111370277016511 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111370277015461 5ustar corecore